From 1b2d306aa07e77f89c2216402bbcd6f5ca65de74 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 6 Sep 2023 12:35:55 +0300 Subject: [PATCH 01/54] Log File Options (#1129) * Log File Options --- .gitlab-ci.yml | 1 + cli/bootnode/boot_node.go | 12 ++++++- cli/config/config.go | 2 ++ cli/export_keys_from_mnemonic.go | 2 +- cli/generate_operator_keys.go | 5 +-- cli/operator/node.go | 12 ++++++- cli/threshold.go | 2 +- integration/qbft/tests/setup_test.go | 2 +- logging/global.go | 49 +++++++++++++++++----------- logging/testing.go | 4 +-- 10 files changed, 63 insertions(+), 28 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1b575794ff..afcc42e934 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -176,3 +176,4 @@ Deploy exporter to prod: only: - main + diff --git a/cli/bootnode/boot_node.go b/cli/bootnode/boot_node.go index 21fb483f32..ddf69d71e4 100644 --- a/cli/bootnode/boot_node.go +++ b/cli/bootnode/boot_node.go @@ -31,7 +31,17 @@ var StartBootNodeCmd = &cobra.Command{ log.Fatal(err) } - if err := logging.SetGlobalLogger(cfg.LogLevel, cfg.LogLevelFormat, cfg.LogFormat, cfg.LogFilePath); err != nil { + err := logging.SetGlobalLogger( + cfg.LogLevel, + cfg.LogLevelFormat, + cfg.LogFormat, + &logging.LogFileOptions{ + FileName: cfg.LogFilePath, + MaxSize: cfg.LogFileSize, + MaxBackups: cfg.LogFileBackups, + }, + ) + if err != nil { log.Fatal(err) } diff --git a/cli/config/config.go b/cli/config/config.go index bff083e8a5..0d074c9361 100644 --- a/cli/config/config.go +++ b/cli/config/config.go @@ -17,6 +17,8 @@ type GlobalConfig struct { LogFormat string `yaml:"LogFormat" env:"LOG_FORMAT" env-default:"console" env-description:"Defines logger's encoding, valid values are 'json' (default) and 'console''"` LogLevelFormat string `yaml:"LogLevelFormat" env:"LOG_LEVEL_FORMAT" env-default:"capitalColor" env-description:"Defines logger's level format, valid values are 'capitalColor' (default), 'capital' or 'lowercase''"` LogFilePath string `yaml:"LogFilePath" env:"LOG_FILE_PATH" env-default:"./data/debug.log" env-description:"Defines a file path to write logs into"` + LogFileSize int `yaml:"LogFileSize" env:"LOG_FILE_SIZE" env-default:"500" env-description:"Defines a file size in megabytes to rotate logs"` + LogFileBackups int `yaml:"LogFileBackups" env:"LOG_FILE_BACKUPS" env-default:"3" env-description:"Defines a number of backups to keep when rotating logs"` } // ProcessArgs processes and handles CLI arguments diff --git a/cli/export_keys_from_mnemonic.go b/cli/export_keys_from_mnemonic.go index 385ef3f73a..add9fc4a98 100644 --- a/cli/export_keys_from_mnemonic.go +++ b/cli/export_keys_from_mnemonic.go @@ -19,7 +19,7 @@ var exportKeysCmd = &cobra.Command{ Use: "export-keys", Short: "exports private/public keys based on given mnemonic", Run: func(cmd *cobra.Command, args []string) { - if err := logging.SetGlobalLogger("dpanic", "capital", "console", ""); err != nil { + if err := logging.SetGlobalLogger("dpanic", "capital", "console", nil); err != nil { log.Fatal(err) } diff --git a/cli/generate_operator_keys.go b/cli/generate_operator_keys.go index 7f846b25f9..d9a4a4b7a8 100644 --- a/cli/generate_operator_keys.go +++ b/cli/generate_operator_keys.go @@ -5,7 +5,8 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" - "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" + + keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" "os" "path/filepath" @@ -84,7 +85,7 @@ var generateOperatorKeysCmd = &cobra.Command{ } } - if err := logging.SetGlobalLogger("debug", "capital", "console", ""); err != nil { + if err := logging.SetGlobalLogger("debug", "capital", "console", nil); err != nil { logger.Fatal("", zap.Error(err)) } diff --git a/cli/operator/node.go b/cli/operator/node.go index f7539575ad..4dd14f558a 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -334,7 +334,17 @@ func setupGlobal(cmd *cobra.Command) (*zap.Logger, error) { } } - if err := logging.SetGlobalLogger(cfg.LogLevel, cfg.LogLevelFormat, cfg.LogFormat, cfg.LogFilePath); err != nil { + err := logging.SetGlobalLogger( + cfg.LogLevel, + cfg.LogLevelFormat, + cfg.LogFormat, + &logging.LogFileOptions{ + FileName: cfg.LogFilePath, + MaxSize: cfg.LogFileSize, + MaxBackups: cfg.LogFileBackups, + }, + ) + if err != nil { return nil, fmt.Errorf("logging.SetGlobalLogger: %w", err) } diff --git a/cli/threshold.go b/cli/threshold.go index b161db709d..ca673eabaa 100644 --- a/cli/threshold.go +++ b/cli/threshold.go @@ -18,7 +18,7 @@ var createThresholdCmd = &cobra.Command{ Use: "create-threshold", Short: "Turns a private key into a threshold key", Run: func(cmd *cobra.Command, args []string) { - if err := logging.SetGlobalLogger("debug", "capital", "console", ""); err != nil { + if err := logging.SetGlobalLogger("debug", "capital", "console", nil); err != nil { log.Fatal(err) } logger := zap.L().Named(logging.NameCreateThreshold) diff --git a/integration/qbft/tests/setup_test.go b/integration/qbft/tests/setup_test.go index 3a5dcb4d39..f8c4222dbc 100644 --- a/integration/qbft/tests/setup_test.go +++ b/integration/qbft/tests/setup_test.go @@ -34,7 +34,7 @@ func GetSharedData(t *testing.T) SharedData { //singleton B-) func TestMain(m *testing.M) { ctx := context.Background() - if err := logging.SetGlobalLogger("debug", "capital", "console", ""); err != nil { + if err := logging.SetGlobalLogger("debug", "capital", "console", nil); err != nil { panic(err) } diff --git a/logging/global.go b/logging/global.go index 7ff7d8740f..11b0eb8ab7 100644 --- a/logging/global.go +++ b/logging/global.go @@ -13,19 +13,6 @@ import ( "go.uber.org/zap/zapcore" ) -// TODO: Log rotation out of the app -func getFileWriter(logFileName string) io.Writer { - fileLogger := &lumberjack.Logger{ - Filename: logFileName, - MaxSize: 500, // megabytes - MaxBackups: 3, - MaxAge: 28, // days - Compress: false, - } - - return fileLogger -} - func parseConfigLevel(levelName string) (zapcore.Level, error) { return zapcore.ParseLevel(levelName) } @@ -43,7 +30,17 @@ func parseConfigLevelEncoder(levelEncoderName string) zapcore.LevelEncoder { } } -func SetGlobalLogger(levelName string, levelEncoderName string, logFormat string, logFilePath string) error { +func SetGlobalLogger(levelName string, levelEncoderName string, logFormat string, fileOptions *LogFileOptions) (err error) { + defer func() { + if err == nil { + zap.L().Debug("logger is ready", + zap.String("level", levelName), + zap.String("encoder", levelEncoderName), + zap.String("format", logFormat), + zap.Any("file_options", fileOptions), + ) + } + }() level, err := parseConfigLevel(levelName) if err != nil { return err @@ -77,25 +74,39 @@ func SetGlobalLogger(levelName string, levelEncoderName string, logFormat string consoleCore := zapcore.NewCore(zapcore.NewConsoleEncoder(cfg.EncoderConfig), os.Stdout, lv) - if logFilePath == "" { + if fileOptions == nil { zap.ReplaceGlobals(zap.New(consoleCore)) return nil } - logFileWriter := getFileWriter(logFilePath) - lv2 := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { return true // debug log returns all logs }) dev := zapcore.NewJSONEncoder(zap.NewDevelopmentEncoderConfig()) - fileCore := zapcore.NewCore(dev, zapcore.AddSync(logFileWriter), lv2) + fileWriter := fileOptions.writer(fileOptions) + fileCore := zapcore.NewCore(dev, zapcore.AddSync(fileWriter), lv2) zap.ReplaceGlobals(zap.New(zapcore.NewTee(consoleCore, fileCore))) - return nil } +type LogFileOptions struct { + FileName string + MaxSize int + MaxBackups int +} + +func (o LogFileOptions) writer(options *LogFileOptions) io.Writer { + return &lumberjack.Logger{ + Filename: options.FileName, + MaxSize: options.MaxSize, // megabytes + MaxBackups: options.MaxBackups, + MaxAge: 28, // days + Compress: false, + } +} + func CapturePanic(logger *zap.Logger) { if r := recover(); r != nil { // defer logger.Sync() diff --git a/logging/testing.go b/logging/testing.go index b73a22ec91..6b6abd8326 100644 --- a/logging/testing.go +++ b/logging/testing.go @@ -8,13 +8,13 @@ import ( ) func TestLogger(t *testing.T) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", "") + err := SetGlobalLogger("debug", "capital", "console", nil) require.NoError(t, err) return zap.L().Named(t.Name()) } func BenchLogger(b *testing.B) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", "") + err := SetGlobalLogger("debug", "capital", "console", nil) require.NoError(b, err) return zap.L().Named(b.Name()) } From 8c84309f9e609e6fa7d05b7f36423734323d782c Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 6 Sep 2023 13:58:30 +0300 Subject: [PATCH 02/54] fix: jammed `indicesChange` channel (#1131) * fix: jammed `indicesChange` channel --------- Co-authored-by: y0sher --- operator/duties/scheduler_test.go | 43 ++++++++++++++++++++++++ operator/duties/validatorregistration.go | 6 ++++ 2 files changed, 49 insertions(+) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index b10b3ec2e2..8015b5e17c 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -237,3 +237,46 @@ func TestScheduler_Run(t *testing.T) { cancel() require.NoError(t, s.Wait()) } + +func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := logging.TestLogger(t) + + mockBeaconNode := mocks.NewMockBeaconNode(ctrl) + mockValidatorController := mocks.NewMockValidatorController(ctrl) + mockTicker := mockslotticker.NewMockTicker(ctrl) + // create multiple mock duty handlers + + opts := &SchedulerOptions{ + Ctx: ctx, + BeaconNode: mockBeaconNode, + Network: networkconfig.TestNetwork, + ValidatorController: mockValidatorController, + Ticker: mockTicker, + IndicesChg: make(chan struct{}), + + BuilderProposals: true, + } + + s := NewScheduler(opts) + + // add multiple mock duty handlers + s.handlers = []dutyHandler{NewValidatorRegistrationHandler()} + mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + err := s.Start(ctx, logger) + require.NoError(t, err) + + s.indicesChg <- struct{}{} // first time make fanout stuck + select { + case s.indicesChg <- struct{}{}: // second send should hang + break + default: + t.Fatal("Channel is jammed") + } + +} diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index cfefcbe7fb..2ac3a49ea3 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -70,6 +70,12 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)] = struct{}{} } h.logger.Debug("validator registration duties sent", zap.Uint64("slot", uint64(slot)), fields.Count(sent)) + + case <-h.indicesChange: + continue + + case <-h.reorg: + continue } } } From e8b10ab86ffc3ea33992b25a6d94b8fc0e73acee Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:00:43 +0300 Subject: [PATCH 03/54] Prevent Unnecessary Round Bumps by Validating Timeout Messages (#1079) --- .../v2/qbft/controller/controller_test.go | 67 ++++++++++++++++++- protocol/v2/qbft/controller/timer.go | 9 ++- protocol/v2/qbft/instance/metrics.go | 2 +- protocol/v2/qbft/roundtimer/timer.go | 9 +-- protocol/v2/qbft/roundtimer/timer_test.go | 4 +- protocol/v2/ssv/runner/timer.go | 2 +- protocol/v2/ssv/validator/timer.go | 14 ++-- protocol/v2/types/messages.go | 9 +-- 8 files changed, 95 insertions(+), 21 deletions(-) diff --git a/protocol/v2/qbft/controller/controller_test.go b/protocol/v2/qbft/controller/controller_test.go index 35c7a39d31..5adbe34f49 100644 --- a/protocol/v2/qbft/controller/controller_test.go +++ b/protocol/v2/qbft/controller/controller_test.go @@ -1,11 +1,17 @@ package controller import ( + "encoding/json" "testing" - "github.com/bloxapp/ssv/protocol/v2/qbft" - + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/types" ) func TestController_Marshaling(t *testing.T) { @@ -25,3 +31,60 @@ func TestController_Marshaling(t *testing.T) { require.NoError(t, err) require.EqualValues(t, byts, bytsDecoded) } + +func TestController_OnTimeoutWithRoundCheck(t *testing.T) { + // Initialize logger + logger := logging.TestLogger(t) + + testConfig := &qbft.Config{ + Signer: spectestingutils.NewTestingKeyManager(), + Network: spectestingutils.NewTestingNetwork(), + Timer: spectestingutils.NewTestingTimer(), + } + + share := spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()) + inst := instance.NewInstance( + testConfig, + share, + []byte{1, 2, 3, 4}, + specqbft.FirstHeight, + ) + + // Initialize Controller + contr := &Controller{} + + // Initialize EventMsg for the test + timeoutData := types.TimeoutData{ + Height: specqbft.FirstHeight, + Round: specqbft.FirstRound, + } + + data, err := json.Marshal(timeoutData) + require.NoError(t, err) + + msg := &types.EventMsg{ + Type: types.Timeout, + Data: data, + } + + // Simulate a scenario where the instance is at a higher round + inst.State.Round = specqbft.Round(2) + contr.StoredInstances.addNewInstance(inst) + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did not bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should not bump") + + // Simulate a scenario where the instance is at the same or lower round + inst.State.Round = specqbft.FirstRound + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should bump") +} diff --git a/protocol/v2/qbft/controller/timer.go b/protocol/v2/qbft/controller/timer.go index f073fa813c..fa3ff1e4db 100644 --- a/protocol/v2/qbft/controller/timer.go +++ b/protocol/v2/qbft/controller/timer.go @@ -19,8 +19,13 @@ func (c *Controller) OnTimeout(logger *zap.Logger, msg types.EventMsg) error { if instance == nil { return errors.New("instance is nil") } - decided, _ := instance.IsDecided() - if decided { + + if timeoutData.Round < instance.State.Round { + logger.Debug("timeout for old round", zap.Uint64("timeout round", uint64(timeoutData.Round)), zap.Uint64("instance round", uint64(instance.State.Round))) + return nil + } + + if decided, _ := instance.IsDecided(); decided { return nil } return instance.UponRoundTimeout(logger) diff --git a/protocol/v2/qbft/instance/metrics.go b/protocol/v2/qbft/instance/metrics.go index e2598671ad..5ffd41d05d 100644 --- a/protocol/v2/qbft/instance/metrics.go +++ b/protocol/v2/qbft/instance/metrics.go @@ -2,13 +2,13 @@ package instance import ( "encoding/hex" - "go.uber.org/zap" "time" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/zap" ) var ( diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index df0463e695..d82a80bc54 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -10,6 +10,7 @@ import ( ) type RoundTimeoutFunc func(specqbft.Round) time.Duration +type OnRoundTimeoutF func(round specqbft.Round) var ( quickTimeoutThreshold = specqbft.Round(8) @@ -36,7 +37,7 @@ type RoundTimer struct { // timer is the underlying time.Timer timer *time.Timer // result holds the result of the timer - done func() + done OnRoundTimeoutF // round is the current round of the timer round int64 @@ -44,7 +45,7 @@ type RoundTimer struct { } // New creates a new instance of RoundTimer. -func New(pctx context.Context, done func()) *RoundTimer { +func New(pctx context.Context, done OnRoundTimeoutF) *RoundTimer { ctx, cancelCtx := context.WithCancel(pctx) return &RoundTimer{ mtx: &sync.RWMutex{}, @@ -57,7 +58,7 @@ func New(pctx context.Context, done func()) *RoundTimer { } // OnTimeout sets a function called on timeout. -func (t *RoundTimer) OnTimeout(done func()) { +func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { t.mtx.Lock() // write to t.done defer t.mtx.Unlock() @@ -101,7 +102,7 @@ func (t *RoundTimer) waitForRound(round specqbft.Round, timeout <-chan time.Time t.mtx.RLock() // read t.done defer t.mtx.RUnlock() if done := t.done; done != nil { - done() + done(round) } }() } diff --git a/protocol/v2/qbft/roundtimer/timer_test.go b/protocol/v2/qbft/roundtimer/timer_test.go index 8c41410db1..d5995ef5c1 100644 --- a/protocol/v2/qbft/roundtimer/timer_test.go +++ b/protocol/v2/qbft/roundtimer/timer_test.go @@ -13,7 +13,7 @@ import ( func TestRoundTimer_TimeoutForRound(t *testing.T) { t.Run("TimeoutForRound", func(t *testing.T) { count := int32(0) - onTimeout := func() { + onTimeout := func(round specqbft.Round) { atomic.AddInt32(&count, 1) } timer := New(context.Background(), onTimeout) @@ -28,7 +28,7 @@ func TestRoundTimer_TimeoutForRound(t *testing.T) { t.Run("timeout round before elapsed", func(t *testing.T) { count := int32(0) - onTimeout := func() { + onTimeout := func(round specqbft.Round) { atomic.AddInt32(&count, 1) } timer := New(context.Background(), onTimeout) diff --git a/protocol/v2/ssv/runner/timer.go b/protocol/v2/ssv/runner/timer.go index 9d8e4a315f..51e25ccbf6 100644 --- a/protocol/v2/ssv/runner/timer.go +++ b/protocol/v2/ssv/runner/timer.go @@ -9,7 +9,7 @@ import ( "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) -type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() +type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF func (b *BaseRunner) registerTimeoutHandler(logger *zap.Logger, instance *instance.Instance, height specqbft.Height) { identifier := spectypes.MessageIDFromBytes(instance.State.ID) diff --git a/protocol/v2/ssv/validator/timer.go b/protocol/v2/ssv/validator/timer.go index 87013bd5dd..6fea69d2fb 100644 --- a/protocol/v2/ssv/validator/timer.go +++ b/protocol/v2/ssv/validator/timer.go @@ -10,12 +10,13 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) -func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() { - return func() { +func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF { + return func(round specqbft.Round) { v.mtx.RLock() // read-lock for v.Queues, v.state defer v.mtx.RUnlock() @@ -30,7 +31,7 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID return } - msg, err := v.createTimerMessage(identifier, height) + msg, err := v.createTimerMessage(identifier, height, round) if err != nil { logger.Debug("❗ failed to create timer msg", zap.Error(err)) return @@ -49,8 +50,11 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID } } -func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height) (*spectypes.SSVMessage, error) { - td := types.TimeoutData{Height: height} +func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height, round specqbft.Round) (*spectypes.SSVMessage, error) { + td := types.TimeoutData{ + Height: height, + Round: round, + } data, err := json.Marshal(td) if err != nil { return nil, errors.Wrap(err, "failed to marshal timeout data") diff --git a/protocol/v2/types/messages.go b/protocol/v2/types/messages.go index 121194142d..529b2ab821 100644 --- a/protocol/v2/types/messages.go +++ b/protocol/v2/types/messages.go @@ -34,6 +34,7 @@ type EventMsg struct { type TimeoutData struct { Height qbft.Height + Round qbft.Round } type ExecuteDutyData struct { @@ -57,11 +58,11 @@ func (m *EventMsg) GetExecuteDutyData() (*ExecuteDutyData, error) { } // Encode returns a msg encoded bytes or error -func (msg *EventMsg) Encode() ([]byte, error) { - return json.Marshal(msg) +func (m *EventMsg) Encode() ([]byte, error) { + return json.Marshal(m) } // Decode returns error if decoding failed -func (msg *EventMsg) Decode(data []byte) error { - return json.Unmarshal(data, &msg) +func (m *EventMsg) Decode(data []byte) error { + return json.Unmarshal(data, &m) } From d2deb8b00c0d29b90df762f1b7a17a750c7b96a8 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:18:23 +0300 Subject: [PATCH 04/54] disable BUILDER_PROPOSALS on stage nodes 5 & 7 (#1123) --- .k8/stage/ssv-node-v2-5-deployment.yml | 2 +- .k8/stage/ssv-node-v2-7-deployment.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.k8/stage/ssv-node-v2-5-deployment.yml b/.k8/stage/ssv-node-v2-5-deployment.yml index 8e0a8436a8..70a7f89f72 100644 --- a/.k8/stage/ssv-node-v2-5-deployment.yml +++ b/.k8/stage/ssv-node-v2-5-deployment.yml @@ -120,7 +120,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-v2-5 diff --git a/.k8/stage/ssv-node-v2-7-deployment.yml b/.k8/stage/ssv-node-v2-7-deployment.yml index b4a4b93e72..bb3488b41d 100644 --- a/.k8/stage/ssv-node-v2-7-deployment.yml +++ b/.k8/stage/ssv-node-v2-7-deployment.yml @@ -122,7 +122,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-v2-7 From 97569fb7ba8353e33369d1e2484a3bedeb2c520c Mon Sep 17 00:00:00 2001 From: rehs0y Date: Wed, 6 Sep 2023 14:19:53 +0300 Subject: [PATCH 05/54] add timeout to make sure channel is blocked. (#1133) --- operator/duties/scheduler_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 8015b5e17c..342ba9e0cd 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -275,7 +275,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { select { case s.indicesChg <- struct{}{}: // second send should hang break - default: + case <-time.After(1 * time.Second): t.Fatal("Channel is jammed") } From ab6b07280c1e1e80e10e183119d65fe6d599969a Mon Sep 17 00:00:00 2001 From: Nikita Kryuchkov Date: Wed, 6 Sep 2023 15:44:43 +0400 Subject: [PATCH 06/54] metrics: fix round distribution (#1014) * Fix round distribution metrics --------- Co-authored-by: moshe-blox --- .../dashboard_ssv_operator_performance.json | 20 +++++++++---------- protocol/v2/qbft/instance/metrics.go | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/monitoring/grafana/dashboard_ssv_operator_performance.json b/monitoring/grafana/dashboard_ssv_operator_performance.json index 1ba7c2714f..ce769ee03d 100644 --- a/monitoring/grafana/dashboard_ssv_operator_performance.json +++ b/monitoring/grafana/dashboard_ssv_operator_performance.json @@ -229,7 +229,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -268,7 +268,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2033,7 +2033,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2072,7 +2072,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3405,7 +3405,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3444,7 +3444,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4777,7 +4777,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4816,7 +4816,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -5969,7 +5969,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -6008,7 +6008,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", diff --git a/protocol/v2/qbft/instance/metrics.go b/protocol/v2/qbft/instance/metrics.go index 5ffd41d05d..e32e49a872 100644 --- a/protocol/v2/qbft/instance/metrics.go +++ b/protocol/v2/qbft/instance/metrics.go @@ -50,7 +50,7 @@ func newMetrics(msgID spectypes.MessageID) *metrics { proposalDuration: metricsStageDuration.WithLabelValues("proposal", hexPubKey), prepareDuration: metricsStageDuration.WithLabelValues("prepare", hexPubKey), commitDuration: metricsStageDuration.WithLabelValues("commit", hexPubKey), - round: metricsRound.WithLabelValues("validator", hexPubKey), + round: metricsRound.WithLabelValues(msgID.GetRoleType().String(), hexPubKey), } } From 013b88b4b2bf21325e56f81d275be6e0ef141211 Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Thu, 7 Sep 2023 14:16:18 +0300 Subject: [PATCH 07/54] Deterministic Round Timeout (#1120) * implementation * deprecate spec timer interface * adjust round timeout func * add TODO SIP comments * add timeout for first round * fix test * add test * adjust tests with custom time at slot func * use beacon network interface in timer * make all roles deterministic except proposer * fix merge conflicts * renaming + rearranging to simplify * spec alignment --------- Co-authored-by: moshe-blox --- integration/qbft/tests/round_change_test.go | 9 +- operator/validator/controller.go | 24 +-- protocol/v2/qbft/config.go | 8 +- .../v2/qbft/controller/controller_test.go | 3 +- protocol/v2/qbft/instance/instance.go | 2 +- protocol/v2/qbft/instance/proposal.go | 2 +- protocol/v2/qbft/instance/round_change.go | 4 +- protocol/v2/qbft/instance/timeout.go | 5 +- protocol/v2/qbft/roundtimer/mocks/timer.go | 100 ++++++++++ protocol/v2/qbft/roundtimer/testing_timer.go | 23 +++ protocol/v2/qbft/roundtimer/timer.go | 123 ++++++++++-- protocol/v2/qbft/roundtimer/timer_test.go | 184 +++++++++++++++--- protocol/v2/qbft/spectest/controller_type.go | 3 +- protocol/v2/qbft/spectest/timeout_type.go | 5 +- protocol/v2/qbft/testing/utils.go | 5 +- protocol/v2/ssv/testing/validator.go | 3 +- protocol/v2/ssv/validator/opts.go | 3 +- scripts/spec-alignment/differ.config.yaml | 3 +- 18 files changed, 426 insertions(+), 83 deletions(-) create mode 100644 protocol/v2/qbft/roundtimer/mocks/timer.go create mode 100644 protocol/v2/qbft/roundtimer/testing_timer.go diff --git a/integration/qbft/tests/round_change_test.go b/integration/qbft/tests/round_change_test.go index 65c6038e5f..4dbb839f5f 100644 --- a/integration/qbft/tests/round_change_test.go +++ b/integration/qbft/tests/round_change_test.go @@ -2,12 +2,13 @@ package tests import ( "testing" + "time" "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" - protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/stretchr/testify/require" + + protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) func TestRoundChange4CommitteeScenario(t *testing.T) { @@ -18,8 +19,8 @@ func TestRoundChange4CommitteeScenario(t *testing.T) { Duties: map[spectypes.OperatorID]DutyProperties{ 2: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, 1: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, - 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, - 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, + 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, + 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, }, ValidationFunctions: map[spectypes.OperatorID]func(*testing.T, int, *protocolstorage.StoredInstance){ 1: roundChangeValidator(), diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 17dcfddc82..1ec3efa634 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -170,7 +170,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { validatorOptions := &validator.Options{ //TODO add vars Network: options.Network, Beacon: options.Beacon, - BeaconNetwork: options.BeaconNetwork.BeaconNetwork, + BeaconNetwork: options.BeaconNetwork.GetNetwork(), Storage: options.StorageMap, //Share: nil, // set per validator Signer: options.KeyManager, @@ -809,7 +809,7 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt }, Storage: options.Storage.Get(role), Network: options.Network, - Timer: roundtimer.New(ctx, nil), + Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), } config.ValueCheckF = valueCheckF @@ -823,26 +823,26 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt for _, role := range runnersType { switch role { case spectypes.BNRoleAttester: - valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) + valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleAttester, valCheck) - runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) + runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) case spectypes.BNRoleProposer: - proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey, options.BuilderProposals) + proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey, options.BuilderProposals) qbftCtrl := buildController(spectypes.BNRoleProposer, proposedValueCheck) - runners[role] = runner.NewProposerRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) + runners[role] = runner.NewProposerRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) runners[role].(*runner.ProposerRunner).ProducesBlindedBlocks = options.BuilderProposals // apply blinded block flag case spectypes.BNRoleAggregator: - aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleAggregator, aggregatorValueCheckF) - runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) + runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) case spectypes.BNRoleSyncCommittee: - syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommittee, syncCommitteeValueCheckF) - runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) case spectypes.BNRoleSyncCommitteeContribution: - syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommitteeContribution, syncCommitteeContributionValueCheckF) - runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) case spectypes.BNRoleValidatorRegistration: qbftCtrl := buildController(spectypes.BNRoleValidatorRegistration, nil) runners[role] = runner.NewValidatorRegistrationRunner(spectypes.PraterNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) diff --git a/protocol/v2/qbft/config.go b/protocol/v2/qbft/config.go index 580b3b03e2..2698d2877e 100644 --- a/protocol/v2/qbft/config.go +++ b/protocol/v2/qbft/config.go @@ -3,6 +3,8 @@ package qbft import ( specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) @@ -24,7 +26,7 @@ type IConfig interface { // GetStorage returns a storage instance GetStorage() qbftstorage.QBFTStore // GetTimer returns round timer - GetTimer() specqbft.Timer + GetTimer() roundtimer.Timer } type Config struct { @@ -35,7 +37,7 @@ type Config struct { ProposerF specqbft.ProposerF Storage qbftstorage.QBFTStore Network specqbft.Network - Timer specqbft.Timer + Timer roundtimer.Timer } // GetSigner returns a Signer instance @@ -74,6 +76,6 @@ func (c *Config) GetStorage() qbftstorage.QBFTStore { } // GetTimer returns round timer -func (c *Config) GetTimer() specqbft.Timer { +func (c *Config) GetTimer() roundtimer.Timer { return c.Timer } diff --git a/protocol/v2/qbft/controller/controller_test.go b/protocol/v2/qbft/controller/controller_test.go index 5adbe34f49..cd119c9d86 100644 --- a/protocol/v2/qbft/controller/controller_test.go +++ b/protocol/v2/qbft/controller/controller_test.go @@ -11,6 +11,7 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -39,7 +40,7 @@ func TestController_OnTimeoutWithRoundCheck(t *testing.T) { testConfig := &qbft.Config{ Signer: spectestingutils.NewTestingKeyManager(), Network: spectestingutils.NewTestingNetwork(), - Timer: spectestingutils.NewTestingTimer(), + Timer: roundtimer.NewTestingTimer(), } share := spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()) diff --git a/protocol/v2/qbft/instance/instance.go b/protocol/v2/qbft/instance/instance.go index 2513268e25..7441a1df30 100644 --- a/protocol/v2/qbft/instance/instance.go +++ b/protocol/v2/qbft/instance/instance.go @@ -66,7 +66,7 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh i.State.Height = height i.metrics.StartStage() - i.config.GetTimer().TimeoutForRound(specqbft.FirstRound) + i.config.GetTimer().TimeoutForRound(height, specqbft.FirstRound) logger = logger.With( fields.Round(i.State.Round), diff --git a/protocol/v2/qbft/instance/proposal.go b/protocol/v2/qbft/instance/proposal.go index a417c04fc4..0b112756c1 100644 --- a/protocol/v2/qbft/instance/proposal.go +++ b/protocol/v2/qbft/instance/proposal.go @@ -33,7 +33,7 @@ func (i *Instance) uponProposal(logger *zap.Logger, signedProposal *specqbft.Sig // A future justified proposal should bump us into future round and reset timer if signedProposal.Message.Round > i.State.Round { - i.config.GetTimer().TimeoutForRound(signedProposal.Message.Round) + i.config.GetTimer().TimeoutForRound(signedProposal.Message.Height, signedProposal.Message.Round) } i.bumpToRound(newRound) diff --git a/protocol/v2/qbft/instance/round_change.go b/protocol/v2/qbft/instance/round_change.go index 5b0de2e3c9..0fb7a54486 100644 --- a/protocol/v2/qbft/instance/round_change.go +++ b/protocol/v2/qbft/instance/round_change.go @@ -85,7 +85,9 @@ func (i *Instance) uponRoundChange( func (i *Instance) uponChangeRoundPartialQuorum(logger *zap.Logger, newRound specqbft.Round, instanceStartValue []byte) error { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) + roundChange, err := CreateRoundChange(i.State, i.config, newRound, instanceStartValue) if err != nil { return errors.Wrap(err, "failed to create round change message") diff --git a/protocol/v2/qbft/instance/timeout.go b/protocol/v2/qbft/instance/timeout.go index ee8e9248b7..62ae4c784c 100644 --- a/protocol/v2/qbft/instance/timeout.go +++ b/protocol/v2/qbft/instance/timeout.go @@ -1,9 +1,10 @@ package instance import ( - "github.com/bloxapp/ssv/logging/fields" "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging/fields" ) var CutoffRound = 15 // stop processing instances after 8*2+120*6 = 14.2 min (~ 2 epochs) @@ -22,7 +23,7 @@ func (i *Instance) UponRoundTimeout(logger *zap.Logger) error { defer func() { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) }() roundChange, err := CreateRoundChange(i.State, i.config, newRound, i.StartValue) diff --git a/protocol/v2/qbft/roundtimer/mocks/timer.go b/protocol/v2/qbft/roundtimer/mocks/timer.go new file mode 100644 index 0000000000..2a691f9ab6 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/mocks/timer.go @@ -0,0 +1,100 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./timer.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + qbft "github.com/bloxapp/ssv-spec/qbft" + gomock "github.com/golang/mock/gomock" +) + +// MockTimer is a mock of Timer interface. +type MockTimer struct { + ctrl *gomock.Controller + recorder *MockTimerMockRecorder +} + +// MockTimerMockRecorder is the mock recorder for MockTimer. +type MockTimerMockRecorder struct { + mock *MockTimer +} + +// NewMockTimer creates a new mock instance. +func NewMockTimer(ctrl *gomock.Controller) *MockTimer { + mock := &MockTimer{ctrl: ctrl} + mock.recorder = &MockTimerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTimer) EXPECT() *MockTimerMockRecorder { + return m.recorder +} + +// TimeoutForRound mocks base method. +func (m *MockTimer) TimeoutForRound(height qbft.Height, round qbft.Round) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TimeoutForRound", height, round) +} + +// TimeoutForRound indicates an expected call of TimeoutForRound. +func (mr *MockTimerMockRecorder) TimeoutForRound(height, round interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeoutForRound", reflect.TypeOf((*MockTimer)(nil).TimeoutForRound), height, round) +} + +// MockBeaconNetwork is a mock of BeaconNetwork interface. +type MockBeaconNetwork struct { + ctrl *gomock.Controller + recorder *MockBeaconNetworkMockRecorder +} + +// MockBeaconNetworkMockRecorder is the mock recorder for MockBeaconNetwork. +type MockBeaconNetworkMockRecorder struct { + mock *MockBeaconNetwork +} + +// NewMockBeaconNetwork creates a new mock instance. +func NewMockBeaconNetwork(ctrl *gomock.Controller) *MockBeaconNetwork { + mock := &MockBeaconNetwork{ctrl: ctrl} + mock.recorder = &MockBeaconNetworkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBeaconNetwork) EXPECT() *MockBeaconNetworkMockRecorder { + return m.recorder +} + +// GetSlotStartTime mocks base method. +func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotStartTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotStartTime indicates an expected call of GetSlotStartTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotStartTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotStartTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotStartTime), slot) +} + +// SlotDurationSec mocks base method. +func (m *MockBeaconNetwork) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockBeaconNetworkMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockBeaconNetwork)(nil).SlotDurationSec)) +} diff --git a/protocol/v2/qbft/roundtimer/testing_timer.go b/protocol/v2/qbft/roundtimer/testing_timer.go new file mode 100644 index 0000000000..310a072aa3 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/testing_timer.go @@ -0,0 +1,23 @@ +package roundtimer + +import specqbft "github.com/bloxapp/ssv-spec/qbft" + +type TimerState struct { + Timeouts int + Round specqbft.Round +} + +type TestQBFTTimer struct { + State TimerState +} + +func NewTestingTimer() Timer { + return &TestQBFTTimer{ + State: TimerState{}, + } +} + +func (t *TestQBFTTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { + t.State.Timeouts++ + t.State.Round = round +} diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index d82a80bc54..4890b1d27f 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -6,26 +6,36 @@ import ( "sync/atomic" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" ) -type RoundTimeoutFunc func(specqbft.Round) time.Duration +//go:generate mockgen -package=mocks -destination=./mocks/timer.go -source=./timer.go + type OnRoundTimeoutF func(round specqbft.Round) -var ( +const ( quickTimeoutThreshold = specqbft.Round(8) quickTimeout = 2 * time.Second slowTimeout = 2 * time.Minute ) -// RoundTimeout returns the number of seconds until next timeout for a give round. -// if the round is smaller than 8 -> 2s; otherwise -> 2m -// see SIP https://github.com/bloxapp/SIPs/pull/22 -func RoundTimeout(r specqbft.Round) time.Duration { - if r <= quickTimeoutThreshold { - return quickTimeout - } - return slowTimeout +// Timer is an interface for a round timer, calling the UponRoundTimeout when times out +type Timer interface { + // TimeoutForRound will reset running timer if exists and will start a new timer for a specific round + TimeoutForRound(height specqbft.Height, round specqbft.Round) +} + +type BeaconNetwork interface { + GetSlotStartTime(slot phase0.Slot) time.Time + SlotDurationSec() time.Duration +} + +type TimeoutOptions struct { + quickThreshold specqbft.Round + quick time.Duration + slow time.Duration } // RoundTimer helps to manage current instance rounds. @@ -40,23 +50,93 @@ type RoundTimer struct { done OnRoundTimeoutF // round is the current round of the timer round int64 - - roundTimeout RoundTimeoutFunc + // timeoutOptions holds the timeoutOptions for the timer + timeoutOptions TimeoutOptions + // role is the role of the instance + role spectypes.BeaconRole + // beaconNetwork is the beacon network + beaconNetwork BeaconNetwork } // New creates a new instance of RoundTimer. -func New(pctx context.Context, done OnRoundTimeoutF) *RoundTimer { +func New(pctx context.Context, beaconNetwork BeaconNetwork, role spectypes.BeaconRole, done OnRoundTimeoutF) *RoundTimer { ctx, cancelCtx := context.WithCancel(pctx) return &RoundTimer{ - mtx: &sync.RWMutex{}, - ctx: ctx, - cancelCtx: cancelCtx, - timer: nil, - done: done, - roundTimeout: RoundTimeout, + mtx: &sync.RWMutex{}, + ctx: ctx, + cancelCtx: cancelCtx, + timer: nil, + done: done, + role: role, + beaconNetwork: beaconNetwork, + timeoutOptions: TimeoutOptions{ + quickThreshold: quickTimeoutThreshold, + quick: quickTimeout, + slow: slowTimeout, + }, } } +// RoundTimeout calculates the timeout duration for a specific role, height, and round. +// +// Timeout Rules: +// - For roles BNRoleAttester and BNRoleSyncCommittee, the base timeout is 1/3 of the slot duration. +// - For roles BNRoleAggregator and BNRoleSyncCommitteeContribution, the base timeout is 2/3 of the slot duration. +// - For role BNRoleProposer, the timeout is either quickTimeout or slowTimeout, depending on the round. +// +// Additional Timeout: +// - For rounds less than or equal to quickThreshold, the additional timeout is 'quick' seconds. +// - For rounds greater than quickThreshold, the additional timeout is 'slow' seconds. +// +// SIP Reference: +// For more details, see SIP at https://github.com/bloxapp/SIPs/pull/22 +// +// TODO: Update SIP for Deterministic Round Timeout +// TODO: Decide if to make the proposer timeout deterministic +// +// Synchronization Note: +// To ensure synchronized timeouts across instances, the timeout is based on the duty start time, +// which is calculated from the slot height. The base timeout is set based on the role, +// and the additional timeout is added based on the round number. +func (t *RoundTimer) RoundTimeout(height specqbft.Height, round specqbft.Round) time.Duration { + // Initialize duration to zero + var baseDuration time.Duration + + // Set base duration based on role + switch t.role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + // third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + // two-third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 * 2 + default: + if round <= t.timeoutOptions.quickThreshold { + return t.timeoutOptions.quick + } + return t.timeoutOptions.slow + } + + // Calculate additional timeout based on round + var additionalTimeout time.Duration + if round <= t.timeoutOptions.quickThreshold { + additionalTimeout = time.Duration(int(round)) * t.timeoutOptions.quick + } else { + quickPortion := time.Duration(t.timeoutOptions.quickThreshold) * t.timeoutOptions.quick + slowPortion := time.Duration(int(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow + additionalTimeout = quickPortion + slowPortion + } + + // Combine base duration and additional timeout + timeoutDuration := baseDuration + additionalTimeout + + // Get the start time of the duty + dutyStartTime := t.beaconNetwork.GetSlotStartTime(phase0.Slot(height)) + + // Calculate the time until the duty should start plus the timeout duration + return time.Until(dutyStartTime.Add(timeoutDuration)) +} + // OnTimeout sets a function called on timeout. func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { t.mtx.Lock() // write to t.done @@ -71,9 +151,10 @@ func (t *RoundTimer) Round() specqbft.Round { } // TimeoutForRound times out for a given round. -func (t *RoundTimer) TimeoutForRound(round specqbft.Round) { +func (t *RoundTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { atomic.StoreInt64(&t.round, int64(round)) - timeout := t.roundTimeout(round) + timeout := t.RoundTimeout(height, round) + // preparing the underlying timer timer := t.timer if timer == nil { diff --git a/protocol/v2/qbft/roundtimer/timer_test.go b/protocol/v2/qbft/roundtimer/timer_test.go index d5995ef5c1..25ce776631 100644 --- a/protocol/v2/qbft/roundtimer/timer_test.go +++ b/protocol/v2/qbft/roundtimer/timer_test.go @@ -2,45 +2,167 @@ package roundtimer import ( "context" + "fmt" + "sync" "sync/atomic" "testing" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer/mocks" ) -func TestRoundTimer_TimeoutForRound(t *testing.T) { - t.Run("TimeoutForRound", func(t *testing.T) { - count := int32(0) - onTimeout := func(round specqbft.Round) { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond - } - timer.TimeoutForRound(specqbft.Round(1)) - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) - - t.Run("timeout round before elapsed", func(t *testing.T) { - count := int32(0) - onTimeout := func(round specqbft.Round) { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond +func TestTimeoutForRound(t *testing.T) { + roles := []spectypes.BeaconRole{ + spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + } + + for _, role := range roles { + t.Run(fmt.Sprintf("TimeoutForRound - %s: <= quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(1)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: > quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(2)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: before elapsed", role), func(t *testing.T) { + testTimeoutForRoundElapsed(t, role, specqbft.Round(2)) + }) + + // TODO: Decide if to make the proposer timeout deterministic + // Proposer role is not tested for multiple synchronized timers since it's not deterministic + if role == spectypes.BNRoleProposer { + continue } - timer.TimeoutForRound(specqbft.Round(1)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) / 2) - timer.TimeoutForRound(specqbft.Round(2)) // reset before elapsed - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(2)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) + t.Run(fmt.Sprintf("TimeoutForRound - %s: multiple synchronized timers", role), func(t *testing.T) { + testTimeoutForRoundMulti(t, role, specqbft.Round(1)) + }) + } +} + +func setupMockBeaconNetwork(t *testing.T) *mocks.MockBeaconNetwork { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(120 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return time.Now() + }, + ).AnyTimes() + return mockBeaconNetwork +} + +func setupTimer(mockBeaconNetwork *mocks.MockBeaconNetwork, onTimeout OnRoundTimeoutF, role spectypes.BeaconRole, round specqbft.Round) *RoundTimer { + timer := New(context.Background(), mockBeaconNetwork, role, onTimeout) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: round, + quick: 100 * time.Millisecond, + slow: 200 * time.Millisecond, + } + + return timer +} + +func testTimeoutForRound(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, threshold) + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, threshold) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundElapsed(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) / 2) + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.Round(2)) // reset before elapsed + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.Round(2)) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundMulti(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + var count int32 + var timestamps = make([]int64, 4) + var mu sync.Mutex + + onTimeout := func(index int) { + atomic.AddInt32(&count, 1) + mu.Lock() + timestamps[index] = time.Now().UnixNano() + mu.Unlock() + } + + timeNow := time.Now() + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(100 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return timeNow + }, + ).AnyTimes() + + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + go func(index int) { + timer := New(context.Background(), mockBeaconNetwork, role, func(round specqbft.Round) { onTimeout(index) }) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: threshold, + quick: 100 * time.Millisecond, + } + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + wg.Done() + }(i) + time.Sleep(time.Millisecond * 10) // Introduce a sleep between creating timers + } + + wg.Wait() // Wait for all go-routines to finish + + timer := New(context.Background(), mockBeaconNetwork, role, nil) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: specqbft.Round(1), + quick: 100 * time.Millisecond, + } + + // Wait a bit more than the expected timeout to ensure all timers have triggered + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) + time.Millisecond*100) + + require.Equal(t, int32(4), atomic.LoadInt32(&count), "All four timers should have triggered") + + mu.Lock() + for i := 1; i < 4; i++ { + require.InDelta(t, timestamps[0], timestamps[i], float64(time.Millisecond*10), "All four timers should expire nearly at the same time") + } + mu.Unlock() } diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index 0d32a545c2..b9e03e6197 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -16,6 +16,7 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" ) @@ -50,7 +51,7 @@ func testTimer( runData *spectests.RunInstanceData, ) { if runData.ExpectedTimerState != nil { - if timer, ok := config.GetTimer().(*spectestingutils.TestQBFTTimer); ok { + if timer, ok := config.GetTimer().(*roundtimer.TestQBFTTimer); ok { require.Equal(t, runData.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, runData.ExpectedTimerState.Round, timer.State.Round) } diff --git a/protocol/v2/qbft/spectest/timeout_type.go b/protocol/v2/qbft/spectest/timeout_type.go index 637e1dd374..73b3fe7cde 100644 --- a/protocol/v2/qbft/spectest/timeout_type.go +++ b/protocol/v2/qbft/spectest/timeout_type.go @@ -7,8 +7,11 @@ import ( "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/stretchr/testify/require" ) @@ -32,7 +35,7 @@ func RunTimeout(t *testing.T, test *SpecTest) { } // test calling timeout - timer, ok := test.Pre.GetConfig().GetTimer().(*testingutils.TestQBFTTimer) + timer, ok := test.Pre.GetConfig().GetTimer().(*roundtimer.TestQBFTTimer) require.True(t, ok) require.Equal(t, test.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, test.ExpectedTimerState.Round, timer.State.Round) diff --git a/protocol/v2/qbft/testing/utils.go b/protocol/v2/qbft/testing/utils.go index 35291f0acc..f07470c007 100644 --- a/protocol/v2/qbft/testing/utils.go +++ b/protocol/v2/qbft/testing/utils.go @@ -6,8 +6,11 @@ import ( specqbft "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/pkg/errors" "go.uber.org/zap" ) @@ -33,7 +36,7 @@ var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, ro }, Storage: TestingStores(logger).Get(role), Network: testingutils.NewTestingNetwork(), - Timer: testingutils.NewTestingTimer(), + Timer: roundtimer.NewTestingTimer(), } } diff --git a/protocol/v2/ssv/testing/validator.go b/protocol/v2/ssv/testing/validator.go index 844145bd8c..d006111c2b 100644 --- a/protocol/v2/ssv/testing/validator.go +++ b/protocol/v2/ssv/testing/validator.go @@ -7,6 +7,7 @@ import ( spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "go.uber.org/zap" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/protocol/v2/qbft/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" @@ -22,7 +23,7 @@ var BaseValidator = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet validator.Options{ Network: spectestingutils.NewTestingNetwork(), Beacon: spectestingutils.NewTestingBeaconNode(), - BeaconNetwork: spectypes.BeaconTestNetwork, + BeaconNetwork: networkconfig.TestNetwork.Beacon, Storage: testing.TestingStores(logger), SSVShare: &types.SSVShare{ Share: *spectestingutils.TestingShare(keySet), diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index e1085dead6..b219e58c6a 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -6,6 +6,7 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" qbftctrl "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/types" @@ -19,7 +20,7 @@ const ( type Options struct { Network specqbft.Network Beacon specssv.BeaconNode - BeaconNetwork spectypes.BeaconNetwork + BeaconNetwork beacon.BeaconNetwork Storage *storage.QBFTStores SSVShare *types.SSVShare Signer spectypes.KeyManager diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 641ad31360..70aa1a50e3 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -8,7 +8,8 @@ ApprovedChanges: ["256a3dc0f1eb7abf","22b66e9a63ba145b","12c1c3a1622fb7cc","1c44 "db32f358b6e8e2bb","f372e174e1f34c3b","bc47b3d202e8cd0d","86a6abca1a1c16d6","1655d21d5a4cad4","ac4e427097fc5533","6b4d5a114f8066ff", "9482fb9b6a953c48","5778a05e0976a6eb","24e2c7f54d5dd1d","2a8937e50d20faa9","587c629a67ef07ed","9d06d8e0ee4e1113","e624ec802068e711", "943be3ce709a99d3","5b3bb2d2262fe8be","c20c4c7ed8d1711d","b10c6fc7dd9eee7","c121cdaab6c1c698","e12b17f3910be26b","e47bf52e962c90af", - "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f"] + "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f", + "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575"] IgnoredIdentifiers: - logger ReducedPackageNames: From 2c728cfedf66bf0e48b40a26bf658961ebf3c415 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:17:27 +0300 Subject: [PATCH 08/54] LA Audit (#1136) * Audits Directory * add to README --- README.md | 2 ++ audits/Least Authority.pdf | Bin 0 -> 262065 bytes 2 files changed, 2 insertions(+) create mode 100644 audits/Least Authority.pdf diff --git a/README.md b/README.md index ba445430f5..c5446cc838 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,8 @@ to run validators in a decentralized and trustless way. * [Security proof for n-t honest parties](https://notes.ethereum.org/DYU-NrRBTxS3X0fu_MidnA) * [MEV Research - Block proposer/ builder separation in SSV](https://hackmd.io/DHt98PC_S_60NbnW4Wgssg) +### Audits +- [Least Authority](/audits/Least%20Authority.pdf) (August 2023) ## Getting Started diff --git a/audits/Least Authority.pdf b/audits/Least Authority.pdf new file mode 100644 index 0000000000000000000000000000000000000000..851cd132d4135340353f69bf408b002437420499 GIT binary patch literal 262065 zcmeGEWmMEr7e9)BXBZj=5F`X7MG&N62&It@MFGj7bLf;BLJ&|&q(xK&m6Vno3_?;+ zQbH-|?z(4up1XeQ|MI`qeRc0MuV(hyXUFI4bN1fnu(+bHa_hQ?pg7r;!R5u@WFqXs z?C#diWH)XI-F5PDwPWWMl@OK^6|+=wbq}yKFfe4-z<9XYVeH&|tbCl@-7MMfdDvR{ z*x9n*a&ohBWmojIb@E}qW9Q-S<-uY1@#m;Ns;$$TRbg^SZM5Nf&+}$03 za20nOZ@}zkYv*O>#V0362EO98i~S!fY^>Nt$b^)1*oAc6y)ag;|A$H9|1jMR^sr+W zGS;(pwzKge6Eg6%_W4KAz*srhkqH?&+4?xLi-<}AN$Pe^4vs$TBEl$9G9hJmS9dQ1 z4=WomTgA@L$;R%Eni80=W999_E(zq{0jiyRy!Gw8l-)5N?ry+sb}2HUTTZS%z$u|y zuE2E_I~#Xfpherx%>k%EiAsyf$+3I;c-dKD$O3X!CR!4>+e3&wwgMXEi?UZmw4W=) zR#s9e{3Sdff+KFiXbX`n_lf9)_5WP;5FpT{kQJe1_E^J(!xg2uh#m5R9_(E-GnW|r z)iBX8K2f^1GFP2?@@&NdH?G{Y`J5LBo7BG_OaV~|0~cfjbnes|oNL3#6DaE$&j<_b9h z1PK$H8$S6cab@b5aFWXKQQtd;-~hYIYp3Qg2$H(ZcT)2~YkXF#xwbYMRm3BiYN}%e zg!1^@JW0q1cE3xT{mCiPXVC%Ba5KYw@3TZtnyVFMzW&DibR@=0PdKXk{qZ_o=+qKP z3`OXQN_wa;;JtATBfoUmteo3kT*MgV!p8_fSQg~fu!Zj@P4{Cas{+3RCJNfE_0X*k zM*F2b&J00=Z(h&?qSn23Oq-F|_{mAQmpFbJG09?P{WfoV-alDg8K44KVn-Jx$Syqr|*V(|ZXOd=!1gMnqT>Wr6yVCkWlhHv_Y3%ahX1T|ZMfvxB;MG zm?_XDH^p9@8n*w@|5D6}X}k7w5yAz{zxWaLOFwe1m<)Y5mu=G+4|VV@^&Q$%0$}81 zI=7-HLU;H8nH#p7{L_A{a-xmK^tB)!;g6$sW9Bz{ zGUk3%_Wne=u@K|ExiTs^`FyEj!mYtKfrMqteMSn88=uqB&2EDCxstkTUSIs$!vVay zJsW<`vRib;*JjFp1NH!F_D#+n|f(bY_lHp%?_BQug?%pTx$@|^1jD6D)7%g zl4cr7Bfp4AU~Nf7aqxdhVp8e_6CHzPjZJ0_(#RVsqPli;_yosAm*i-Cj^BzeEjy*p_Aft`2a~P#0+q`v-PXA;l2)Ac+A4o z9n>Yf4=T>n$SK{%Y(F8N^sty(EctWmfAdRYWTkX<8yRG@5V{OXIyLtHZ0^^64j7}K zkGD%K!|^MVI{)WJpT*yIcRU(OD)Of}4H^L-@7qyB-vpJ>woAt<8!sJL6F2QOT3?rf zjfcHYdRW^zK&Tt?6C*Kshw5`f#mcW{53URNmH0+~EwN`*gq~?@U2c&6Z)(_T1l~Wz zz%|DDG{JVq@&Yd0=NO~&k@Ej6N*^PQlnqY%xX63LGWaIpf(ZxEO`d{67koE<=Pv7E zmANmUI9do~plQ4&#CO*0XD~l0)LTze7tM}esGsjs zu79j6*28o^Jjib+0cLTEf3V3$x$mt`%kEhWroPwLd7Ju8&A26D;$#@I>+>$FL(Wv zKi$zj1J+GjHHYj5ZU!8Ge^4oXel+S}Ro(Jf9vA`!0ylSh3a;gPPZ@k1`)`8s%&!--z60kuqJ*(h-83Wxzw&8G4~xq-)cF}tG+Z`erjtV6?na>v_3-)bXU z!b0q~#Q=@=V0qi&jV|%|#?Pf}20bl#K!bGEcV`FJhx!UBK5?9c9)Ih_r;cwViU;n! zmQfxu&IhOKLYAa0fY|+|%tG$8@5L#1(7|a;OE54}V^N40jI1A>?T)$)`bLfSV4F8; zoCSNAyLMZe<@iq@g3)QemYZQis?$C1o1|Lm%}_5B-t9HL3^0Au&3vYDbX;0e)4bghzbyPv(K;dlZcg8$l<5`!Xk|_7^9APg%TpcBTu)H(oa{ zWY;%QJ!)cm^!a@2Xl2;}jKqPFoO;0%w<7&_3)%dKlg0Aqe}2}M$(7{>n;bFYi$<@~ zm?`_5k)X;+*Y}`GBD4Zic%HW1(_sqz&-b*f7ALcxJHPiIoPx4y*x^Nb=w-ljLpNl* z*7cytVyZ=M5D5xr_OdnjRDg~9=E$*b(`ZiE_8+IkcAx=g)9)TC=(V+M>*-GStH@#R}P!!@x$`{?w|2FDBe;cefSCxBcsm3|@pA zP4AhPs!KHGO#sC{KbCUvPpoh1nBl)h8B*i8CYe}Dm~$v3X19U-4& z7QW$rY!b6xeg9c#H<+9{tUCRD%f0>$swb;CH!MuMb@9vETc#VKtpkZnVCtt^JuRof zi#)wq&Au>>^R?Q0%w-~%JTIryftufA>sBmIWFvayID9t9AZs$1QTusFWEBa1Xl&r^ z+eX=>@v6>4x!?Qs%G3$#Pf>&@PjSONhyU~>o1gL?i-uY!<3o37* zfK~K`oeaVTXC-W7Cr$6dYIB;V>$+sRw!nySQez=@dwz;Lh)nl&p;#$3uu-*xF?{gy zOyeP{Gbdt(QfHI_6ymsUyRH^4y)4az_{!B$Nrp?H9{yt67>=47yls>%$JsTzByQ*O z<0pP4_%SyavpH_-O4v(>3~dm`oE#e!InWB=q04lfNt=Ed>t(9rH|j=Ps~n@pQa2xV z@@EcF1X>BSTy(90e>?Tf#Vq3Xy|A;zQCo6+uZHxKmU-_~u;b&>EAr_mQ67aP2kNV4 zm*9F$D=f4hcr5U30pT1Iant=LH%X2P1k^IF{)bkbzWkd{%6wRM_nS9G#o7GV!PsIujH@)I$sZe--Jq>lhpQ3JX@XXzs5X2io}d60XN|>-^t}c z>u!Ak-`Cf@mQhPuIqDb(Jv?gMIpDnh!Peq`di1>CDDsozLMqFvvr(Sc-Von#3(+dGa`RC{V?xt>4^$@Igas_2w>Pb4?c9Rz+V^% z6W{lvD>U2^_kP*1XgcnEkad10WT9P-5Au1gF0*&$S>!i0xTLhm>xz1o{C{qf(t}9D zY1@7@f9sAwaiJoXr9x!7oEy5`IKfuI#cC}22E5g-<$6-fdUUP%d?Ws`fBS8d!IfY5 zHIf^{{IqvQB-XRaV8Sm;ksKur**vMjpIP=UFP42)tG_jOGag z9{{tn~d(g`I)_|ZMLV= z_jb3dw{A^uLRMcNJVD1@z&oDTg!mL3L?kLo*^T=!-=-aJ&~g1*U|DbU=X{B4L;VdQ9*?Ut z5)o6Sf=dM>LE~WwCMGywCO6-?F12d>-Piv(Bzk6^;QrAYUo$x%y+@bUjjo;g;GIl% zN|@7S4Ywf^JaxqXexAfMu5Y7jpIiK?)^SGY4IBQ@7prDlwV#aT4~}m-0fCJ>Hvjs$ za`%&g$0`Z9kAnR53zb{DCXh|{B}53*Lz4hWKp{QM;9_6peqYNw&Hr@iHpzU(|Jrdy z`m~5SOTa|*x9?|~UL9NU*@@R8{=N$ru{-$ebKjra`)M+zJtzEtCz_n8^2uQ>-{pX* z(yH8{Q#C-d{rRQie&1`>Uv7p@+um;nsK8XL+FPyX=^RE%hb%d){eWMFoGe zH~{-SuWJ5?>34v39jk}&!G`;wDQnS=5l9@^+MU>4592y{p~jL)S+%2smaGv?xs~Fy zV1S|VBf-wL<)>K<6Fk8Om6)GxbY>3@fs+4y{)fQ-5cnSg{|_T@K6RM|f8;_WN9Xly zFF}3t|D8`5%!#Cd3jdt?-*?FWKCI&3H2?4C-T%klOx3)s0{{PiClwQs6qo-0c_?kV znXq%CwsfE3Wpo}5<4qU=ixQP$II%u~Fa`2uOx~9SS)aH4R)xKY83YkB;r7)LhnTt2 z(zTSkCSfPESAK>N28FE;s(NX>a`Q}%k3T$3Xxt5g)aP`Nr?hm`wBZmoWSd^*6tzW2h=5iy5vL6n$74eTkiw1R?XWPP>Q!P0 z>o~Bp{^0a&2uPdM8rmmaIW>D%K=Dh5sW>wi(pilMsg@)wl`u)Z+1V20W0ih~{+8vR z!_A=XA>oN62qw@KhS2^w;=~67VFBy%KRy3Atn$MktU_b4GiLi>Zv1{in&;0_Al^EO z2%DZbzc{RWiFof~H_#mr1)FwIKA9watx#LB3PDM5d8n~bOf{hMivT->)+^V-Sq7Ib z_#296?@|E%yHYWR+MZtv5;k2jL51EaE^@SAg4O+LaO}QX&cb7{6bNbv z-^700O-$B1zyTb*U)dF-Qu7O8$roTyIm4tA%#}iw>1tioSD=! zd>q&g@LoMaI2$@SX_iBW^TJEM)o~B)&CT`+4MDQeY2|IkS-r#t0~Qd}LB!fQd`Zjy zt`#=|tSaITBx_?O`c%oXA5`rfB$<8F*70@)Pv8C5cy*znzXXGY5q}F~L6yamNA-|d zKsA#W~#7+4W*O!f8So6oRIT`c>|vIXz3y zO*Y$`2f(|m0`~y*hGg;aPH%|Ii=h2iM^;4CP`PqlMw)&(f&v<0y&8ItE2??V7qCkG z@=Ic;IF5lq=%DiO9#~bi5IBvkrM_!eDgAY}1l9bP%`_RxgNM`#5Y9p)he(A)974DE z$DEk#u9wt+8Zk*B0js82Om^r0=}$JWQ$;ASUV-eOv}RLY>j$H%giIOy2pR z46PLF^-rWk&`N4ylrcj*Lhkf!SqO+|>aU#4-RFfpgl8?}Prih!L)e>2Ziur#5*`Oa zKOU983jr4d)D+KY>JRtN6j`BP$F*S&?-eYMWx%BiqbTe9^TWZ;!_xC-ty)vZl?@CW ziML{C0YuFC1UEX0>Sr~)JU~Y%`Om-Of zv>YCdjP#P&5A~`Usas0n=R^%=PSWia5-w22!k)Jasg`B%sRA<_;W}!1kvfZ6V}@s~ zQaDe3cI#(?>75X^WT3w*&-9r>8%(kwyufNm@zp115ObfFU$@BL+^EK!w#(pRiA_>s zYagV4*3BK&OPkwqAxg_5H$&Wp4C8le&pdZu6Y8Y0*(l8HD#y(C>1NqLnKJL6^D zcJqSey({6KSTO-3;TLdV2TLkSWar_`9kFXkp4f(+ENQ2Qv=lgwK(VsW8+>_1_M#z4 zc9+$vZ6^TCG(};<8&zE&(XZ~ok)Kl0^90if0*@OPcFM9W)fdY8e4~h;vRnee62ql& zeyxW;N(;dd1&;bQfP)F%nD7Ku<_TyT!o+ z`3RA=xCY6@n4~w-Bl`oTu1)r13E75c}iyv0ZRyV!IwB|a>$q|6zZ@G^b8&@j z_$&z94w5}+e77us@V2RVsqNj(k@=Ae-hs;D! zYQ!XwwKkt>AG|(guOip+aD!R!9KV70$X44fSvozi%gd+xfZQNU;B7d{xk~s(?7>u5f%qWJ{tE;WB&tmGGtkB9 zPSTNeTLp4MvoN_l&>I9_xz+$8DMxW7o~cU)hxx#>Q2sBnM!51J85bun^&jaK&wR1* z)dv(|4{}Yr5Fji2abtu33>E0dYt`MLz@q?ovj%0#_^>O6kMD6M-xp#55qxYT?~Ps2 z;4VToA6UIL{Z0lc(5u756YUMn2+ncXfIQo_9SeN(B!F= z-WO9o7G@}k+~l*2snrAH`7P5a$uRyp0il ze;eQM-efjp7rD3&88LZp&fErDdWHOfluh8%Oc*+SLM8OK@9xDUSywN|e4kRDtAhLr+2eAPur)7~U5q91yZJN%$a?ygaSFWRD1Cxpo^|^D zBu|U5xsw$UM6KH1{H>Vbqz#F!?`|Un`}^ZN;e&7|%b$Xf0`CA%+vmFUmvfiE@QH4-a1{>MCJ<|{NR<0GD} zVJLh!aUC2MvjQcHRjon^VLfJ_p$R@#LL|rydP9Y^ zjn0=SF-=0~??yuqrsE^+Cz|MdX3aXz;z%4R5n3&u54~1*qZKxJ_c3XhIuCv;sc4Gl zrQH-}Y=!M*W^3A@qT!Ir0gF)4@(bz=DO-L|@rTsmqp*(@YyrjQx^1 zK?K77o}Dszia=sw=dTb46wUDc_QTjui9$GR%dcD4OD@qE+&WVs4v6Gu_m7(G&%THO z*NQJbyl6NT8YVR`P4B%Zs^E2XTnSU~nau*EVrBjq2&Dtrwpz%PUEHe1tg$hHhT|qBp%bS zPCu7Q`Xke!TMEMt<2DTpCAyRS2Hlp}wMqH@l^~MJT%@{$K`M(I7b)VDO^CyRpOR7< zmG>kN`aYLYnM7Fy$!`20VkB{{|yU+_ga8rz*sCrV}}c zFV#5{-F9$~yE(r%wyQB0fO4JZu_lwlHArl_{l`@dIhrgHs*5Qx&c(|7`0Z9)+(6^| zD~CxA68Q9W`CCoxw+b#9oUA&54p+nOvFR`jU1ppfx=fO{0v+#CbUtRpG&giyhWz51 zS4&EvIOIlx-c!0^ML&TRu+_V2YTv_oWpP2ec~|>n!5+&K*XbW{r-Y@lPXFxY4R8E+ zo>W8$o(}0>zWvhwvhZ+R?Z~hs6_(3pa?j8JvLhZTvsLAr-9!OsLFV z_oOa}3JwG(H1ETe(02#E#i>S)zhrLFsaOziR`(E0m#dV86(6|8X&5AUghprltBwc* z^*ka@FA;sa{HGU`w~qpmWtPWJyb$Q^sCBHV5m0u(cx*T4d2m@Q1Xj-My133y3fyF~ zja9h8y-hNh1G#2)d_`MMijKnM9*cK~w#<}-LM!1?@+0XeZ9i1?~p+Pe>d zu>8d9WuOQ>i|w>gCFz=!1HONI)fm1^o+m2gSEZQTI7tAQQh4PUAM!gViE`fT2jW!M z8^-ov9=4nV@cLIoe8`Pl5E;CB(*X<=ID7pzHZa>--?Cy=rla#^B5IZpE&Iha#+z{U zdzx)l1ZG%C_vAs;7Zl|B^ldk`G*2-OU&WYH&ADC!hc+F@4MagXbPc(oIIQvv=l*|_ ziXrES;2$57MAyKWZi`@7<46A}|BoE-=`q>@S8P@Pe z`^Ls*Y# znJnWqw~DkG*hzM*2(Xg^Nyx%C^!28u1W!l8?^3oSQjy z`8vN9S%y>GPio7e%R z#+-P08JM+dumO+%4W;sMBkW~P{hH}$6nbExPe2_p_9wLr!wv;qecAGrGKu#DCnmA_RW_Pn6DO&tlTe5(hxXlkoVn0wOJ#K_>gS_Kvk4&ga9%O&)}G+6E*_f5d` zFE6=SGJnUJzLv%b)$eL)A&>98{0+@2n_HXhUN>MxTmFi|yy`Wm8>5GpD=R6XzZK@) zVVzF*-Tm?GJ%brDoINv%I%A7j_y;RrX&tGlDl0QM!Pryr#4f5Ot;vfht4hZh+vcry z7*-t)<_pQcMQ+6BhZo!)R>XUBOPxe}^QOnupRmx?op$GMqQEEw4K;j_0d?znLhvX+ z*Exj+5TmFszzu@S!sRZ7?5-l0==i|CTZxLo9sFr~`}%f3K%FWy%f*hcf9u|@Rfhfb zow&2?*)@7B0vK)PWV7Sna2@jrsXwf^@eIPLE0Ab=4r}^jj|X^GU?Bs4u6V0L)VhFr z==pH%Ny`yHGGvqtgcUuoIj-eyJtLkIAz^p{(1`G~hI{n{kD)}%9X&!|$X&c^>*0gQ z&?p6&iOO0Vn-&@SUaj37qjV$Jsi#Dp~!+cKYr}6~d6job- z4TzWb=gr8+8+5Uw(I?nRRYN z>gH$X$MMBSZ~x-+3oUF!*x4jmVe>Gy=it$Hq%Sd}fr;MV+sZ<@(lk&kmYQ+=a$S&z zNh({hEN)|ge!2igop=+`#&~;^;W_gfF>@7TWagD48FkBX6-ZJOnB*z#`74)tiFg?- zS7CU82=`X2KLK~p^s4UouFJx_V@K zhVqM>_##pa#v$xy4-8~DVl1w;q7575iw)o4b8WmFE?Q@bOP{_@W(fbTPi@-Co4jgFXjJ|0O=le zMqVq!@)Od~a>cmhpBin#&wsm&li;F-V{lQ=TU+(iK7^LH!q%P&?N1xiSAk&Nh`EX2 zaoxD`Q|q|ZJ=(vv)3Y34q8JZz074Sq3~l}PEv1=)9V2)`_% z`qlwniHTs1OTMi9B8wc}|9vM(|koc9`r>P)SPRl;`8+h12>-Yv>}3+AqUrKJlaCmwTvc zPNY6XrxWheLKnSBw7VT!n?E0nnevpB!!Ka%+`zq800ua6yG#=cnJ8R{Vi#a;{EdiA zZxgtE`?r54A+=N9=ki~nM&s(xk`*#M)?03yjGfb;;6wL1h>fy%V8Xy*H$PTz{c8)9 zhVL|{?L^83Zppm3Pe|?VER{x;Q-tA$d;#)1$&|Bn3dL9z^!=GnPY8wLq!9(NuIzUAn=Rzh!wc+X}k>JMXYiV_UDAyn-JNg8f(+C0@flc%5aVEm+wFmL3M~f zgiqQleh>m~M!-$C`jT*Csxy>q;sd|5pan_4O}Qsx>tWZwGt}a3)A?L!g6Vl zV)|iuG;YVKM;*>MO)S($MWo1x>{O``*^tSY|LQL8wAz6m*nbqM+q%;z(oa+t@$+~m zcGVG24MQhfgK6PA*=beB$FH8|gr^o)XO6?=11PoqnSfT(Vh-WX(5v^Cw-cv{%dm^t zpoM6J4^e0AoYwNdqSzXV3W9zO*IdAEF)E`)S6Zq`*-xe~mSaxA>4Z#_K(xI<*eYLN#2Fvw$U zH~iR}_ZV;S*K(ck>4FF@Yl`qrKO(zE{LCl}zFv^xywx`_=bRWkUM$3LGR>Khq6`uj z@fSw;)AcBoiq=UpmQWAa<3U_$&2wnBt>7wCBC!@75P7AX5N?}I1VeL;7(&}C;SEo( z0bjJx4R1cue*)KZx26jFMr`Y$v3BfmbEo5BvdF({A{L7A75||qk52gClEUGTV^_?r z&xub;u7WZ|?Gqz>#=9ny7hB>>GizE~ImX)7xvqW972iula#>{`b}~b0+}K{3Q~#0% zqJdy@q4nLY#MV@N*(^m3Kxy3MCWVg}>y;kQS9Uu4GP=uK94#~xh1a-~=b$|p-v`{B zRez15yD?huZf*Bt*$Zy&ytUcYiLv_Y*XX|2 z)vY;?lPVyuJv?tGoz@+aYnJXg*||vxf3nQZ6!{1CHm2uwcP;T+IwR~?*H}BMaN)-_ zSuZA|DSA=`30DKTL;3yR@H!waEUb%7ta-1JZ#3Ywe@!zb6pqpVY447@3+-3#ZR%+0 z@*jLN%>3{VrWLmAhQS5*3_68Ly|7#KP)8!#3}4dj#a-V{<{zvR z#B#W>Wve$_6$Wvk@1k>#e<6&|@#KziJ4yd42W7l*jc&WYg-c#tA6!z}G1&=gQ-RH(J^kWWi0#lB9Lw|7O!M{Fi_S4_g8K#?aVW*X#= z?+>dsFY%*4ZM8oxiP{$UAaihKAs*Hd$Jh9h-&yGh+Ng&SYMX*ZA63kg;2i`QL0t5^ zeAx>f8Lk?yE8qo!Vcm6A_sSlA{g18W1#g;O$##25;pBhZ#j$`Ol@xjR@yn%5aI3pQ z5N|NZP|e=;t@X5|i04vZ8Zb#rBkj;-&c&1r3yYsIl&p{_TsG#96CB=Z&(T4`d$uov zG_r@s$2_&lO{CiM-PV{L2sO(ddgAb*5BPWq5fC%73-YR4mue&@D7{3M;d^M)OSH|E zF{~Jj(Sqp<`_8{gxgc;^UeU|+4sHc+QU-Tda1#*R_5qZ-DMScaT#IyB&1)=27h4D| z&1T(4;Hk9($pLfWv}Zww{8r;ie>n_ybTP({!*oCVFPsNhRTasTWu&zyD5*`b7B$7AVx z*MZOX1ogtFHm1K(hcf;U%QH!M80Yk9nu@G|i9u7N~ z_>h=+hRFSzM%pg^z7<`>XkQV(7G8N-gyUn_#f!q|!`ECY>}`~jcTxnl|D6mP0#h5W zY>?YGQmSZGw_b&4EED|lKN4(K&DR_0XYbF!pOhn zhj4Q~%%0eo^2>coJWHe5ZEgD25(;=pj4eI#3d9bNJeEjxomzvI2Q6Iq^FpTmJT>p# zcH1x{3ZH+rQqh_ZA#A?08I^}LoZEJOtuBR+rOOMv47&Xe@y>2;_XC?9xMgr9SEg3} zzClsrU42&i61yi| zCRw$_i>eDvDL%0YgfwjqT+@Gf*@ZbpaDIpfULSJtJoHXwHVdP6oEiXEe69qazvx(2 zgfxuLMlL&UBYkBs_)IY&HvWN=4YUu93CKljCVO}$9YUhttMkUAb)4d~pIFc^^ z%5vxJpoGhUT6tL{-E%`2?hyx~jloq82Setfgx+H3=cs(QW3cgFk^EOXE811}?yVxG zqZP5yDXZhG({hbl)?|*Cb2L6u`RLLV+7Deu5Cwk+i@n5XVP{kx*g(KCC-UsJ!ts{c z)?OwrO6s;Bt;oMp{Jl+w&3!K_Gn_}OnuNX*tfz}jx`OV@$JTq>Tq7|hA*%ea1BXq8 zcp}%)sb?G&g!f@KKA##-k{3o^NulcOQ;M;lbwC1n&p5QAnQ#R>4tjjRbm;mqo4p7; zPdnAVI`IOfRg?4!r3 zQ#4i0E=Z$Or|*oXT=?}-dB?Ll=m*ECpd%Wm`w|zfs z|3s9)Q|8?Tw~nmM^slCixV!xX|qCwqOcQp4kr@=@REwlr>U2T%3g5uPcws> zxA@8AO)a-KWC8JFadE}i;FVm@L#~B+s$64Kz$y2A*fM)4W*is)ItG{ei-18`sv|32 zF*p4Wl1$`UuQM&?gWJYfBo2%3UeM-0_?A*M&`K=R#WVVLe35q*mbcVuei6%fwm&R> z!N!3u&f2Yd0MYFvdTYe+O`ooA*RYgghvpJ}S-amFh?Rj@Ch6ES=CfdKcj~L9-vHaew8;9&W7RXx2VEB=PpsjKH;T{ea*78S%__$ON?7i zPX=yDlTiG-tTR*$!H~&taoksmvwJPcm**LLZ`dnrZ@G^T;XMpWr+9>?f|EhrNw**tFDLL)u={Y z9iy~xS>&XI?~5Jy@v}nG!6#b;ooZ&89dPCOlQi&H8vJr8vpy+b4psjX!TgKJX3u zJtAnl71vEi_-ib1G8yj;6M~2Z{|ua};;R($wWhGn%kQxYKf1by?Z&3t2AOvz2ma|* zQM$@gsWsfFBNtn(WX*N4e55s9L#3gWziyjj#z3kyk8Vl!&$nt;QKkDBI_fvr&?_@a zvHp?q?cT&s&cF#G`ZlsWf}kuC$Z{=O)@PI~z61W?_%)ar-k@K#{(iOlFqSkm{ zR5Ixpr_ji3=0nX`g&at)Bxt-gcFC%BWRjgY;8d09HQdbwh=^b|^9r-bNrYEzFY&L!7u2OSsdm$U9RmZsSHqXn*Y)~iS`2m3s<05&qC1anf>T6rNQ7H@ z=NGBpXA3rh5hBpeq)&OfI9%4Hp@W^rMekSQRSb`%zU3z#kKih5#`Kam?d?Y>N5O8`-ssAs z@G9TWz=SVs40m`}L^{~&bhKX-8cmZMBN7tl18se7v}>*_Kbl5b(*DWI4{-(IAumz+ zc#rr>O!HCg$HS1&NE~@y6mIqx0cyEa2VKLL-j^UnPgeINQt0U;IRJqzxR6I8sK4Z? zvQ;sSXWa`vB-z5%KlW4E%Ew|bL0(CJh!)roM9&}ZYQGr2v_Zyeq5qgd|Vw%LURgwbtRia4gP=r^AQ`d(FpP&4jlU*4~J zgMj+v$uUYCPCE0w*bgWn5AQ6J-GtY@^qBDr!vpjpE3oG#O9EShy#zrcT$4oGej=70 zr&EkQ`R=CvWSb_;d0xW%hV6-i=xZ7vUkUAX)iT!NkIVKLj3njPGwdxe~K$ctwgK;h$4DwH+%m!JyrC;NC3%2Y6lP#iYomvqTQviPS}31hWaJ zFY13<+tzDTCClr>5Tn(l1-TtkiaWH;1&xk@x}CS?4T{kwPI;CoQ3hf_B-gN_S;2g( zR$f%j-l6GZ&}2H~jIok}vt^r*W#)F97d-fVx^0n7DB21UcyoI&n3i2ih8#H0L=t${@0 zfBExxzg?|SpVd$)ysS9_qthjAe`vPN9Y-m{z0L4qQXqgPl{!PTp@|dGe^CVC$ejZK zeagn66dCfMXj(Oi!IoS#H>!Qq@CM2DR}E>2?JO=0LlrN_7r*QAUf}9PONjOzi?MgP zdnk3;5Yg)_?F$zela*ODee{|p@>Kb%K|qLIK1l^bD3-u z=E_~)s&Pn@^(gPV-0;K5NUpM5Rd2E=*S-_W1lYXbW;MtZtqh$9GEPS;UnUWj@ctX~zx@VZy!_k8iGUEXUHaXQ-g0R>;$; zNZE)dDGpEBC3@UopFycWG*U zC4?b}zA&Y|8clzSoY0suCA^?6BRK0!;i&nzF&cuiN+a5yxyoP|Mc6qDc$^CD$B4q^ z?m_085~0umRe8!mIRk563iS$T;?@07mf6tz0rW zNal@Z-T)L3car?`=rHMK_Cgu9FkS4q4=4_DXcUy+U-Ko_iKeLW|jEG75!qfHhgl7uDeBqBq zg5vjc-zxuR4;$x{xSKdJ?7hr7!|juUIC|y=AGvBxqTPDaqa_NjJu?4!R-m2j(wbTA zjQkgM_WNRx|1AEg$SsnPy0tHxdz6xgD7+;GM>!bg-OAcl=0l7Vq%pDzIou;S_8v|} zk$y>9ArcB2e^F3V8AkaGr&#Ch7mqkVin%RhH}XuNKu!S z@ZL);d^&hq@n)!z;2|#sj&<>=c>vWfp&^TvAz`BYl$?gSfwLj;ZJkT6_=*sFMdmj~ zn5{{=Wdey@LqL~;&lxsEJEssAWi@6lN0 z(95SOG)XO|DFMzr-yPWvxPM{jBf4BH zUp&nP++}_~)KovL2oh~*jT~SR3U>(<0|GgO-6X{R)*`n25nw>qCGn|3HG6%9d;rbfU*Yo0J|(WJXS| zy~f6zAmz#hK16}#aB2+1IoIXcIZt$qy`@G;X^dBoXTyGaYyug6ii>HQ*&AKh`tQ{W? zpD%CUV6vMWGZR}Oy32X`qdVj5@adJ8LLSoS7O+l&E?PaAhijJg^52!erKfWo{^(|I zW$SV$T_HE47!mrEl_TAY!@axMl&iHbhIAFHA*sP0Z@wHi(?dx1Fi zh2`TP24b>WTs#bjWp|-R?7Eq$ydzzU$!AM%)O{i;X@3qeF{+@OSd>UW3G(n!;97jw zp!f)JtXui;l0Pgc@wnh*H)56D=ZQejG(EA&xTAL!rJr&Nb1?;s0Cm}hZ2lQ_@4HRu zrNkdCjj%`PWA2ZwC3n38mYB3#8!41Xx|3d?)gHJ_%sH~HM5oRu5xulH%ea?IzE$2t z^64a}OTQ_gKj&6@EBJHk3%Wh7qnb0EB|^aurxhAE&DmqfFRh+WwwiUvgG6uNcaN!Q zFyDN=LQx{1jH~N%-4E(v$w*4H+Lwmbgi>LvLi2|ChmqGBw-e>nr0#CX>1ls$WclDG z+~mObj<{yoLfV)uG*F=Za$Uyg!c`)H_NGf8+&q+jJUbwCGivrJ7d;u41cN+k6}5sB z$Z{dhmh6d&f*+q3rm-x{Ri44>h0By)jUP!2aX(-mZhqd)(A=5174vf) zZVXd==_SxmVK7S2()>x&tL^C`E=|MFbmH|jK197sF0>E!ae!_P)+I%^mUthcRYwY+w@=s}y{NB#v|PIpZi!7L$&jy~s;}Dw|2HksH*Vwf zCd-MfcP>tkC*$RHBkGmh*_m*6art|mrBNq@_7|zArXoG9)0>V2cE!YmqlNgy$Z)9t zi@djbi{p91M`xGC-67cG5}Y8x6A~b30t9yno-72{#Y52G4#6!z2(F83fZ!HbB)Gft z+t2r$i}MGZ=iHpTd3vhfuI`%Zndz>2`|2v6p1^05UI6i$jN^1618gqQ-+`1fO+yJW zm@5A}vxg0sQ_S*jr{W$+{S(ob5b)OuiA<&588VU9C6-oC9Gw7um}eGnOt_~@h6eDg z8SdU&XV+2eJ^mDuY_$VXhM_TBy{-C8XX2$t>O%REBvMYJJUQ3tU{A)&H5sGhC}tlS zwZy;HoO4Tuy)*|))rUS6EqyA5yCtNDaS__QuY*fXQD;b2TK`Oevd1`|x#Ob1FecR) z?dJ$fyb)0u6d}e<+h9u;&!9{Qy6sHH&<_6l=^M{Ehg5gX1{KSFvUI#HlITlUr+d?* zYh9_WCy$q~sp=I`iUTR#8EsANGtMzx3=Ed;s;Z(O+Q`4@#)z&JXjlL~>L11`PW5bh z{OnR9-3eKIhlksIXtwg0b%I^6nPc>(tK(}GBR{ofMLsrEDk%Q97|cU)boS1dy_whRhN*^S80dL1WR5yAiSgP7 zj9G4;qyI*K+7W_S&tA?kaPAobAH_JCV_KLwgh+7hy%4yb_&XlK(^|kGr1DzH*fjLO zuh$3TY%W7Fw931|VVSk=xgh62=64P$fc9hoivahYSa{?GGu`xym6xVBU`GLJPW%X- zNEaq%zA8C)ne-yL2+JQlx<(Jzl z1dGP+$~*SyKG?i>@XV%x8M9a?{eG`Is034K2>N6213KX-fZpKVfcjmGz>R$BL{5>Z zo}p-gPX*~5eqoYeL-BC~<2uKkpuR5Fn{W~Gu-B>H^v9i71yhJ0N+z=LUQ1R)&+u2= zTn>>TR^-zemW9+d@9!oLEyM(EyhJ7V(fBlHT%6DatJ|qPjMvB{maiLE-=Ztqu0C8W zxPKfx!ZaCIfho>zTTW++g;0Vd0=^|zGe0%M@<6fmiORHpcI=aA7sa{RMiy6@VJ&Ch zhjJmW2rsHCwxObiSSki}dTy%Bv7H|R<(RI>DBG{{MedFW>0p=qN|TfylYY06%i9Wd zFcOEtmt7`o$~B*=$XWmm-c%-M;|U*AhLy;aJW;p5^}5viP2EoWetsB}Osu;|gtIiD z3z)S1Lj@bZtdb?$yCw`ctL1~_cLXEKi5DM|mN1>D@V^8{*bzWdflGW*%Id+-@3jM6 z+p55L=ET+)jWP#BiC75D2Pm;K!T$^KaHn z<1oNP@-1MuT2;)Fv4FSa+I9I3{w%YPAVJ3bMiN_l8UhWom!>}W1|y6?2>JuQeuZKE zDi1gOSxlGEUrOII%{#w)F5Bl=IAHL*0Fq#p4xh&P=|H?OA%?`$AyH#-u60sUz8s>B+E`o)BmsyNyk@s> ze)v27p})Xh;0g_0;P*=lj17*lU-UF@DmO=rWC+mPy_Q94Rl$8l*KcW8R`9MRuw1Z) zd(~#8o`+3|RI%~2OI(ek8VPV1({yxn`~?&bZ)b|5=U-ltF<^^B3M(BcfOJG{+phoh z(IHWL(2Fnxw3!(|qUWH35>Vnmj+hG;UbHBR5(hwCu!#deUl>6EDzX&}z)xZs`4cY@ zAOP`XjFG>hP7lR5LW=?|LxBPbVT&iNKc!%(vOv*skbD*=fWVbSMSj%&_^)b9JOL{B zkQ>G9Nk|P2qOAwulnBk6FqAx;5>61%K`-`yiC@iYqq1Z@`-!Ob0?<%7QGK#tzFqDOD`?CwscP+}2%r7(xe68>wN`6uQ<2<$Q}%U*=Hu*-Md z_6eL_&GPHh#K()}6`&oj9qd6jkC*t`t%3d?pA5;OV19hJycNqA%Xnm zhd$W|(p_2%Y7gS@$1o=BOy+9p9pe?Joh9nLOu=(@^lFoBt~*ACznfU}B&!{LH~I4P z5ZzqSAcg?54svJ-TQRb}V0|+E?C!mo>MO^|XEJtaP$KfTq*mX%0`NOV>`bouH$>~? zn$Gc-h4Ht2{UXzn>NM+kPYxPd=m@UAzj(4p7BNN_LNNEk>H@=)xpg$3v$mCqb#QQD zg$NgG+|(;+7IoLNYRhNpmRqu*PAB-4-hY;A5xO7b{MYb1V;@JPNFY_io4s|^QMF{B zocG=IuB%v;6;)Vw8G7oP!%hRsS;Bnx?c!;|yzzJUKDxZFfIQhzW&F^%nnJ^BJ$dz# zACzODxbGxZj`dvul=0bv`_O(GBVL!;95!H)zkYhpnvQ|NoxuB5ADakZ(wm* zuF>YT6L&GoRrKRA&v1QBJumUw$8hg=h7(CX#>?e$n+8 z3Wo+SC2qlZbJx&d%uZ@gwz1%u1{HTCbR!{`-hf|De0l0;l@0Ibh?VO(LXF(6#*{2V zL{vUKD{UM;BC?&XW1V3_oYwhekWD}Rxf%CxelY|YF2Ig=y;(y*xws9w3d(q2r=}GD zPA`QLqOMX0rrd(b>BeB1cCyb|I2y!nMZI}sy$}XrS|w46`-kmzc}oH9V`{IH-%@N> zAyDfzFHi%{Fu4Ls=`6R<-AGig`E@IXhqry$;R-{U9O(RhYhlgtX7eJ-AifKv7-s3o z(yuK}3%I8^rD);`Zr04_-B`SfK?IYw5sNFOU(Bo^zCEA#Yd6+b#?D3>Y0_mQw8Oa7 zZ><@bWjrV57KY{>q-`078eyz1iNJClrF``i4{Xu>2zRZU;*vh`0+R{#%V=DvLSy71l1 zm6zkRC_{>_{kTTT-L2dn?Dxs$7A%Kj^?Jis;zlIB=S8e=@aK+@zo`AiApzbQ=q zI6=Dg^W#T?*mWeXec))YdpERo~HzV?ZSoD9!7ys6~0QI%GmMC z%*vDle!-Hy|7muzUguuRhn5hw&DFj7`V|kVNdlGJC}_Fo?~W|Sf{4GDy@9Avd42N% zV)@Sl8w=EO^~FojMNA9O{|;VoXW1^rL4e$i{(rT&PO;Ww4#sG)cKqhC~rMgk28I?_g_ zqUH`&r;{Z5R4Q}~1OrMNgN%{+lSP>PC&(Pi!Zcm3cRJVO=$kTN+5d(<@{*9t_RkEa zI5P(rHP%$@?h=W&_3Rh-VeYV3vM=g(rY48x$^nu+4_QIZpusssWJJweC_zWntYr0FEaM2#cc@u+#n{ zB^eI@xIAtye5UGffmi~_+rpItb%O;#Kvs}Qmf9SAH1c+#nkr+n*pXWITfpw5yR_Xx zJsav)TV>0X2oJb7`?0Xj%2aB}@_u2nUWx)M2WuB(QrTALNGuBkAg5YZn+ZW=PCk6s z<&FjrBH5$E<=3RB;M0(&U=Z2!)7JdT|GU^|qJ6iZF8sgZ6>>B%)&(~8toUY#oA*oL zH587tDW*SCzD5wqqL?zosD4!V&+P852L=LPO^G42k?IjZUF{D;e5{-=M1ue97brlG zDiI@@BBKR({OiYybJt7GOENG=mwJkr-1fvo-u6=kbvfYaudAG!S{{B+MDs}iW!!3lKB+kZ7zy)Jdag60ziPwh)FAXQw!kJ~e~ zvRHfmAK15-zP#~b(4Ys~Z>ZX`uP^sff4|$i^sJ-IVFq@)+Oa4 zE~)(3Q9rq_3o1??4$}fa+!(I*Mn>^axbSy3^6U3JIRzDYuX7BhY7{vpeC^N;Jls*VDzg0MmH#1NW`_1u+JZCZ)*^1@>I)s6V*gVyfEIdri95%K zJ0qGlz8w5G=Z$C1*&VXXD#?2v{VkkX92PM@RP={Uno-KS9~8DC$K98#nqs}rCzZ!7 z9Rk+>svBn>+7C`aKbsg8?l6}?;h(bh&67tz$Lo4exH|rO9N}j*Y>$7$rwBM6FD$;_! zo9Sb6fqi*K+V^)zy9TZ-xil~(yq9;nNAeZb;heWO8>c%dcnYpRvv3?-X|*z04zd1JNSfFlnFir|=*9B0f!D zn$*4OQ^&*VSHSeEM~rsJIT+bt;Fol!PP`L86PLm@;fos`3dAM6g+Rk~QS*SI-Da|K zrbj{9^weyZCKId&%(vKK1G&8y2OSy05496NOevk%bv>NAL zO$1KU%y4Fx;ZP!q)hA3`H9D2n3@IL4+O#v!@O*YzoxsS(S5R>#^h@(*^4YWq2@LYDQ(VO?F`qk>Bd zV3tvh*$F;Q|Bd+}*XUD*4k_a3#Ziy=bkz->7R~giOTy62L}w?ZIW0aKsD8mk)#Yi| z@-nr={!_(WY}2DH!%{U0gy`)W%}dG!Ca=_~slq|EVTofdAxRx_Hn;%kuhSjKz5c8` zlh1oKd(>mBLMZI}QqBh>aKzmAEM>pfu#cH7h~d?uJNAcH4-L%2uNNkAD_G&GW9yar zbeF+*`!O%F`PQCOw`Z(o;2F?p{c&^9KN`8&EDeP4kKAVJuxpt$c|3gctsuu>=I&C| zrRRN2n%i%m$pSz{J!`H)GO%e1ZDItQ-l4%ql!VNzpfmveDN~X_1u%foQf{vL$k|uHHInooakgFY$G?aXDQCR^R$Z{_Mk% z`A#ly%GFhPVdLigh+|FsTbauC6m{!Deh&f5#*Iz&LChr?9V<#iji`0G8ji)EPdtxf z()ncrmrW?D9cnUuSFGa5y!e)BPvlV&B=!`(aLhiO)15(aZVKMY^-6QRbeVru_u|8V z`Tl4&8D^i1PHh{2qgH-^h8wbsg1Km7m0nQJ6ONUHke&LlO)shK&c^}HF$L8AosIO< z1*dt~ri&cEo$E>NmL*ETaRq*pa^5nOnvtmFT2xPqFO%Ba_>S$ITR*DTiR{ZCuk*78Qdb(ieS`62azScz_{MPp}gWWQO%m+QMMlUdK=1Us#CbZ z-e)NX?)OOvy{LI$xX+U^nIpXVpG48<&9PQc(wbu|I#lmzcZKf zE=|V;V%d+A$Lk7C(p8S>B9J3W+t@g(xUJ|jtyOy?t@p2iYaMH%3jHS?J~reL=ku{I zx3YdgGV+Pj?ZvtsY7%g z#?6SAL+9FY8vG}M;JGT>$e`0`;i2}y3m^lt=QCT$~Te3neInr(IOZ?sK`Cfx4uJrJr*<)~?4w1$6#Xow#?G zDoQvxP@6NG%zn*?OA(&zh$J=x8DV?-e{?UEVGACc!gwW zxqs7Y1K~TCR_3kGJOM?P+xXKmm%Y~|Gu`u5#ih{z&ctyb;c$${!$-##pml3e@jAR_ zhef*|zRTG!WFkV_7qf0W_cU*x-v*bztDLu=i6wAP#@31QUgp+QBjAF&WN8ZI1Us5! zuofNuoPFCRoNqXhurR)I-(r7wYi8@k3R*Y$8_m-dmg2uN^<;n5Y)WT-v9}erQ{!zf z@h1E+`fYo(zuoV?0&@Sinm{9~Q|Hfr#-hqhbrv|#8SerC)6rVi;eo1O+$b&=N-mQL z52sK$qo>$V8Ql$E(}&v=D&Ztj*@x%Pw*?z{%@+L%I!yScfZIv^R@NTmS8%^u!G~*u zkF6g3HJ=qh>+BZ6ie&+uOg5>p7;iXwge^%BxZYo9B9*`#+(A{+{1S`Q!oMWd3= zfsq`MsVs>*_ip0$57}u>e8IA>LF;W1a$hplK5fQ=x0ra7YxPsc;sYfheW_2`j3?-J zkC|#Au_hSXDI_hJlH&Y5kXRaV8=c|AHC{ooi#*xXo_5@m*HpzE@I@ZzU;U45RTB%( za{;A__P4S0BgcndxY=GFx%82JtJua|n`3(1!OpRg6b*ZP8%b%P0_e5UCj<`bynFf~ zH+h~S#Bwm>^~V%mPL*=BGpk>J6KNI1ql$ywZydsuWZ0Se4{`Sr%%mBPBSSJ{|k zvW-lBrW^gsr@L&K<`yuOFECY62XC4~EmB#(n^I=~QEx>25Dy!Ck9j;)ZNU1tAJWQV z-ru1x-lQT0TF+8Zc+8kk-q<81;DATz3c938Yw_dj3`$X~q*a*R=%@qNx61y_o`fDJ z8%ZRx+CN-p!aZO3roHe-!HlWdzB876$B0!PpX zW*I8;uO~Au_?ae^?C?85tU32Nf!Q+w6qeiGWZ7#?LgsjE?>Do}pV?o4%dnxl?oP>_ z=Xu)p+aBQALDp+rHhknguGC;$OrtHTk297sjR(nI;C^L&g}ao;HkE(X2|#t1dL@OG z%FYCyhMhH@W(>q{>4$A$8sO|lDbch-U+@ISMG`dWEb$Bd4Vnk#?-Kb?UaX@TGRa1* zcfedBXM0?0E2U;)IJn>V6ODJGT-pnz{SIH3S9AWuZBTaZ>g z^H0_{Xjh>w%xIYDae$kby$}};^zfRrdX#|DdTmz&+&^--GczQ$@GR&7izaP;r0m+R zYrmKkzW2(xi()0KK*ZHg3=}!4qV@z)H-QQO;U0dFlx)e(1x{M)4ek>BU9E?zLLjm3 z7$a&y=}H98R({{-FW>HYYtH*_u&%7(f1q+FRxM_M?*$Rdi!PRb7lDdr0Cj&}bD~|j zcUS*O%gmte&;I2Y^2x506J9P_eHJkHZKP#^f<)Hy`dM`E?8U-u{Z5F^;lNp?jfAURJOS02%a0cte%nuW=}MX58dFmUKaRbiziNJrucwD zTk4VFUpsCjsR(vCGkmXBtc2`#F41~7h7Eq>{9;V*-!gGTVt^C>UZsJuh0qQTboZO< zuk4@nN2iKEV*&ZmZfvo-T9LlF(yTHgai#FyYUPHnKpNc_cCo*--5Ls{vaLxuxqC7A z;coe{V&f_oGb7`(kxk%!g8dEmDB+XqbYB4EgG-nD$K%4hll&;Ra_v2_tluv?mRv$S z5LpCU!@TIM#H+LclsWjUtw5IP+1GXDY?7C=_qe7N+w!9(stsV{Teb@VY&Vv8>32u3 z*lRIR^^!;syjwGcbsw}SWZkwF^!#dc>ld1VER7jydXE+7z)cV;$WBSu6dzQxZ;#!i zRMMD-okPFXhz(+i_|+~_E1XviR1ZNX+WMQsac3Mh!BpQ>XpLK-Lxo88`34D=g>SFA zU)w0=!k&R#j!_RG6fV;+cENMKsH>;zad%6Bh`cFkW#L`)-OD|QHCMoylQO-2!U)d= z5g`~1t3ZaL2lVZxQpkTVAKK9Q(;vNsTUexhgvB-pO59$>FQX+94#0u07_xvrYz~jg z*K``qSe(0=0#P23X53n=33IWaaSC&hFy|!B;50H11bWFsfTgWKKSw)zc~_8SpF5%F z8m*l9pw}q4pFUU3X*;ooe+j0iS{!<0#zYR`SEyMBGm2AzIi6HS@l$CU2$kYOcduOA z{jJI4U;`K2a10bD?d|m3iNEJW2)Yuq-N4Nk#QiO2l?y%Y1@IbemZ9ts2~LQ2kDvf2XmLX9(#9#uu4d#@D;zxW*B@M9O(l$cQWB!CoB6~)!W2U?$5=g~e9 zO>UXZuiKcUly#$CpTXRaG_@1cPz2^W^bYfEv*u$>Y!0fT@%RQxYWa$)BOqnC$dWIZ z-1yF<5dI2-17%>sSn6RTt~oYtUU2@eazQl?)cWP+sg4^=wAe1^ zk! zt)3C&p`*e>)_vprc%Uj4bS9g|&q0N-RSDY^f!*tL6greT^a~eRPcbya7|mT%4SY}! z5f7oB>Ki@5?RsA_I|7M4?DSgYY2$8X>v=GA99VMUOC02eXCPXBO|TDFHKE>o=V_gb z_(z9|Pm?SX=lT|y*jZi7F4$)oV+9;X;-Wta@oPB-jwNY1ScChQ8SX7z9z4gjt7UWy z^sDeemLF~;y>(Khb(DbXFd8>MFY~0Kl5q=ZoSXbC>&k^wnA+UBV7Y(0-$wU(4VM1=sBTZTzV2Bm)q;Hj_Yb9@pt9N5wtu$k zRN8jPLt?jC7uSKyH=JznW5O8>Z#nOuR5I?X%G9rP49$7tUMq$?fpJlp;Qm@m{ZyMc zzp_f@7mL%|nl@U_4i6j`HJ7-v&PWH${VRv}~=0&J{ z)3X!PuuT6<~xYh+sn1E z4>i#^?hD61unHj4u&&_!LuO-N4VG2rsWno@o~=H0DgE@=^?8wn;g^2?*!zoP5HU*8^>m-!Y%! ziA7|$9a7~Ut4%5pv$QfiEP<~W3hqmdDcUA_YO z)9LTR5`AxNysv1MscCAuViv(+>38?`CE2Gl{e)5>hZ}@(o-M!yh_Vs&?L3ibV%Qh& zCE%UufQ_@N%J3-To(EZLz<@V4d=&N>Vk( zpPrDpe#3z7KryJl~gN ztXCnilxbJg0^XsbZP-h^l>NE*?{eCobfJrEE`uCnS2Eg}ce4|IIbV$lhcg_Fi<)fR zsEAauxi#q?8KTu{#}h#lTJj3G4bYcH%kT(pSw`D0u%NvOdjPpD9=y1UY6`sL87h@v9({NGPRTRhxD zWB#50HZ?K&_+GzBbpJF){g6TKEGI)Jk+F)+Y~-s?#tF}1TG2dD(T^_|$7Okwr>+bp zTE1mCInN+>NeP>u8sj|ktavOUif3w|YCr)l-QsCw`o+R2>F(`|HX7zmGT0lCme1LD zs~B?;wSoKf)cFj<>i8_1OKgF48*{lQzq z>t~yN7ElW^x_oFDuAkl*`?3*E=$Vr7?cY4%MOFnDTtBT^`SQW~Jci>7UW=6j_5Tc+ zlM!3bAn|hTZZl_wJzY@YYvfZ-xZS_x0o_`)%(I zVfHvN8~x5V^NbYGj1NTCV;l}q&{FTL<&127OXg%sD{>PXk}}c(dTay3=aSH<^p^zd zBE3{`neupQ|IE*f=B2jXIQxo54nfDKPjkT@f_U%Ry$(l58h>53triArs{S%|JWL3fgKv!cuI_28obGYSS~&qmV6nL=cR`bH%js*aymP% zj&nulUlZ9ku8QQzM|rIx2u~(nzJf1D!?v)w7}!y`eDa-JG?F*)$m)QgosG^od6rTP zLi}|$q2GTTV`uZ?+y8)$-oSsWwMqzpZKzpiZ6HIlFHv6gg$!{-*k-s)Sjd+}wfkNh z`0Tq>T>3eN*C(}kqPl*`t#pxW`$aVtT}*w{Ymc07(y(c7BHjj&tJwKLHW>S8~EQSkt2{X@fyo?e+d)h=)U5bQlRJCN7ESjWZI}6u^vRv@&xTu zX0rrAWmQ#tJ8M2uUU`QrcX8C48y{79>Gj-zQ&#ByfYbgTSlY{kc8?k_=>~C43D7O= zqMhj4jL>g1*Krr=tRl(9#pCSrK1rJ_5WUOHKm?z4t`^#jX8cR@{qD<@gHg1Up$k&+ zQ_%li4d0#lLV*~wBKpxLF|VATA@eH``0!S?V)ZD^&k+4YbNzHVQ_a)udmeLvIF4DM zk&IX5E25gD8{bK8*k#P_T$<U}W4Xcl+ukSn5 zVM%=*?`b6~y|TF&?^&|%4_tP-aB1mE&UuL2V!pkxhV*4-eT=?Rxp#;ul46@`Ls-|9 z=X?bUhtcd#{YAkQCd|eFnjHPqjo;fg5B<*i_mHDyPRVr(3kFkd4TZl zD0a^8q06F<1)u0Zlb?U~ADv@xBAMTSU8kf^{|b1fm}SKj$e-?SciJ~IW2q=7V`(9l z&lJxb@;;iRjJ9%{ya5tbbhtA)(}a*W!Xyy>a(1enW=3@uTX$<7(A`XaHxKSP1_v_5 zJ@QG^r2&-0s;Vs`H*~qd7VdLOIB^f6>1{&$1tuW|WKDWNG+*fOSQR7z1ia)=A@Sb6+L^#2=^kXTkk*1J(~$W%#s>5 zT(pgeq!4?Y93IX+hMh7264?C{TkzH4?>nWr9a8Ip^YDZB=ttgUR1J2=w$J1HtGbypJ1KKuytK!?Y04R+iQ8vc2R>8%b51+KBM;(2>*J5 zqEu;B>W_gxIEs@{`RvE?$R@|YfQI_ zJ>ehB{GVzs^G;~-F+BN(oF8nCt$jb(!?$`}G}ZIOOKuDOhwf4LMr-?969R>2M5cxD zOG0fyzTKCwIvX0(i>UasKlg1qs;0ho%(y=kV7c z_r47GpX&a}CNT-0F{~Jc^wmE6V!!>T^XTL@Fm*c`(tm;4HofIcb=5Q8n6bY3ZQ;Q@ zqI0$5+Z^US-*WrERdsBn*F_&Ge!;vRJ<0y~Tzw)fE;0A_I=s&p*dP%*T#_zp+^ z4k%Cu+C%X~))qp801yDh-2-5^FW{)tB_P=+l&A~1Mc1a50uk!;59Cp5I1vO35EJpC zr-FdMKy*~B+TWQv)L{@xOmysjCQ($z01i`T$e;g6je$WY5_P%HDAHK~Zr;yMfA+sC z!v;@+!+OGhD)>4BYN*JNk$q@k<-^4C6*OiXfrn}&Cz#II)7EYQb(YK=7OAYsf+0#u zAr@dFqyUhL5Ns&PcT~f#zMu+F(r+PH&~Z}`5FKI!w8vtS12FDk@E0s90P&{^2Z@dg z1>kvRWU^Zf0McU?a)f(>35e0Nf%bkrfB-Ql1?a9AMSce)$WkIYrs{|R;A;*}PS`Cj zAja_(a8q?e0|7Z?aey(@AAmD=;UcYZHz@&_jp60%FUvZZ=cou^$R||V$c>4#&#oN} z917W{LB&= zWD@=LFZaPO<$fu3VY~NbsJE&cF~#;*vYY7}4jpnyoccc$?T z*RsLjQ?Qul8><5GsQo&-^uvj=XS0gWHYt5?l2`Kd?waTx7mwdZmrA~xJL9IDe|mBD z+1hdA8IM&g_We!CJ!zVM^MiLRS0?MA0b_14`@aZOTZ+eFa#`H8P1U=5WG4|<=&rPjR-O2jIHk!@3pX#?t#rzltsh%UX zmXr7muKu1594h+iVtDAu3G@Z2Y&xN2+wV-Tflo8fy~1tHJ|4@Jz?ekWty~6|dAQeK zJ~7G2^52X6A^8`$56WAv!X6}l^PhUuywRY>e=(S9t)4mjNnw&$d>VG1?*aDKFNY5C z^?D4WJcR{lcd-*1kG%EBin*dTMjS5YmVs=EW?^;i^;~klyFHMBN$=&R^A5hrq5$^4 zuPi7VP2f?s9gNmrO~3naZ$0*rj$`EN*1uJCJ)qyDTQ#^6Z0smR^6zIvSs(XD8R!9=zh6pSRY%4}bU` zMEK^pAEa>}ys_EsM?xzgGTJ8c2{u`@*X8+bX==nU4(G*6k-;7=WWt*7mV~_&CnG2C!Vs7)b`4cXKbU^RL#rqh+JD;(+OfVh2tvfgR z=wc_6cR<@uupsl{C;eT~SKl%N15YCWf9J!Wek<3*7@dZ9xPMa{{V8{H(_)4YWb`Jt z|0IOkra5;5V+^hvs*>)1nB2LUkquas|CRbc64)nzTKD|4cy>Ax4QDqQa=V_WzXo#f*3yCr?^+vqVwTWwYD(+G2@~A|d(F>t`bc zp=<%O?Z8M|NCx8?UmCKoLtpXULNL@i`G&rItLG$I7zO=Cs-_Ly6JZj+L0?(LNd|i| zMP92V(Dv+pZ&ihGAljlOjFUqP>&e!i$7xG!gYzZ-9)4rOM>OE|pZ#Rm%5nv! zq^XRMeVLmd`?HhW*U(&&*yx}5Wf_hT*=!O+U?j?jt;gA)x5%UpyDx;E8QWX`4Mw9i zK0g*xrU#Q=%G4KQUm%$JK{a8j=8XT?tMigjW{O|s!8JF6w_?de9z<(k*KZ=tdncuW z%33)Wq4U6G`y>_&l$!8hA$f@yPKqAzdQ2iSyMJNmLf&2ku+zi3w2%Av%eLQDy~kBc zhFc`t*_tGuMy{1z=8a5f+`G&1J$6Elj-jCo{!&c}lpi)3A+VM&S-kk5MRZ$2PgYQZ zuc>`+xu>d(#Xujfs@`L_rF?w7R)GF~3LcO?>vgCCsnV+xS`kRUCT4@Lac}i4o>UQb z+G8QtnNbx05c(D0)G$7}P0;V^zuDTm42cvhnOKDVnlLqVW%SxBtKfoP*Z41A8p!ndFb&XZp@6;O*xRL&beTD zO=(%S&hxIyiC9vTH+SeZTHy0z=a6aedm)j}|`1FS+pB_i}UDpp(zvx2g ziK;>GZT?j2=xo8vELZcZ+KFDl*@hDm#z2vg{U8xi0z@Y_8Rq)d%mrg3ujEB9{XNo1 zYn&mVXO_rJpS=n>n<_C~>}^KT8#xN}*t8Q9Q&QO_OoC&0x)l$~)inpB#c5j3BLrq1 z)6sQ`iZD{Kk#hf*N^9#}N|ES5d|be3>- zjq-j~J)YX%3~ow_cqbZK)E?}CSVNbp*gVFuN2@wE7q6uYKmOfVEA}uGtF=og1oFXq zM}gf*e_4G`$pT+fXV8}-$dP*qB1GXYO5a_=PQZ&V@4I{xvU;BM zc?e7p@Lqe!X%8A=9A0NK__;OF&yzKfGOGS4_i?FP1z^|vl;wAEg_rSwj*Z-E6^-#1 zBFrh$B9RrR$$9%*_w1T)!V-(eq{DK4y|+hHB)!9HmfpE2`7rx|U$v-*QWO^{HkKa4 zreHR%`JF^AC7eUr1ke$=7#ox+ltUc*vtLh*id@t`dX>L*QJ1-GULDn=e*aef=Ru!8 z?P~#356w4#Erhg}LF(SjYkC1%(>lzsMEwK9zC&hxyKAio#vrp@^eC=g%+>2og&unv zpkTI_zbf+<)sO+V-g9!h2Ploc%AFaFQFB}dU*V%(I7kNPa8@(@;e zNKAi{Pt|NLbZ>2Ln_|iYx08GFYxqXSWx#dG1LdRL625=wquPHD*#rtMZM)r!_JYnL z*RV{j`4N17k)N;~9H)cEw+QvzKGt+n{uZjmL3X33W05zDwFFalsu-`Rab#)&$(ubQ z+pZ4=SV%E(dv=*LhoJPtKM*V0x~wgKhG92)dBAZ)G+S$qJ_wfEId4=Tr?$w=W#J3*$D1N5_*?hsVc@ii;*!qENTsNF=d1 znArs)-;YK8o!KKe5`jh6!R!)rsdi&#g!eKTIW8V{D+DE~Yfua{{S|-%Ui`57l*x2F zgmb+v;DxFb_8FtBUW9b&{Q`5sz!0%%T>GfS%9*4f=q1;`B}L#7+R{fyg~nr(IX&me zP0j3`TF!`2#pC`HJcqm8Nfr(6{P$%_LV2#$WPA_eepjH{NarpDIj{q zSC`8sPHcMTIsBXscnzNGRoUShR>&rUGNv;LgULcS;j9!L#vUwAbB z3JJ&@GiCVc^TjXJY8;EI42!=B;m4{c0S;9sY~+4rqD>Y=Dc>!G zgsWYZxgK%<9_BER6$Pt2qLEe$?q~&%`Hy>jptb2(!6qV=ylj9~yeJ9q{d2>0=BdIP zp9pD)o3jgYgK&2jeCkI$(@}k5gqWuBy(KoyKgXA@4Aaz4g#AEXd_|L5@S@K~89eWU z+!>)i0@J!r=EeV^?meTT%DOF3a?XNeD6&YZTqMaTS(0SQMb1^^42lE^0+J;upeUdu z36hft5(EK}EQlyckPPyU-S*e??K|!m{k7wb_xMqxD5{FN*IsMxb@pC!oes7FOn2f`fa}oh~a$VV6E{XbQsmwdDBD3AlEk+E@)k~zC-`peh3@-E0Psooq+Od6N z=n;_9*z9lkhH&Nbs%Kzp*g4SuhPIwwTlCY25K7M(1QX5uEoQk{ITPcCTJj`XB zxNa4d4Ij~Ph44>4ZyKYNyR!ieC9buklFs7&)sxR9uQ#Y*g74eIXMQvF19YokgW)yx zu=mk!5WWkHMAy}uCTq9%zP!`S>+o1U&rZNxTJj>+#)v zE}B9|W?sM4``>j#n(8X99WIKhgh~ZddgS94rJM0_LT_$XzU?#-!F2<$;(at@n#vRs zkL_=<#)sWbAYhVx=Bs>}i=Jk;sM+dSw(zCU7#svmVWz_7W&WDP?jCCS$pUbUWE={SW{$Jcu zTW=Q%;4Zj|#+BacLy#ui6Z0LA7~=G!!0RNqIzaY1inb;i|AP_!R+3 zzp>|1!nHje5+ELAL}c#Gz8R2tN^uAq?`yi^ z@unOmboG0~4!%2aGs%uDsAJ|s#EE9MYKr|1a~wx~lFMD2{tmxkJd4v;hA+rldF4}|)!jqH$tbXm5;Ic#J*+vB^WWr9o2#ABQk!mqAXARHdA=dHG?v~q-c#=^T`V^C!h zugu&k?v=jx+eiw<3JQ;Q&YL`XY{J7M8kt$Rx%CA)y5IH&^W`tyAbQQ}oI8rlD(MkS zxQrFor&`P@#@k2H)8+dd2Xuf-%*gNS!#PQydr!_2rBqH>5075F|LMM$&S`x{;9aX~ z5<92tyYsg9pVJ|-2gj3JUGZFuOfsjlvpqb!S4O>LE|5H9(qLQ{1-~ir>CK<(0=7)lgTUCxTVIh-7Ch{ zj{+)Z8zl(i9$+7M8n`XKkU~e(#?J3~`$YB;T1Pe1P(8`+{D8DE@icm95VK`=>jq?` z4!1;dZX_dSkTU%FLzMfNDE;{R#R#GCu*@l6kxvRD)5LttFDi-U=L5L!#2yvc!q`e4 zrFu3F`f;GuvaJp98b0Yc=L_AS2YuFNWj9PKZaA+j9eDlgu%k6W#x^3n|DB+lkzk4cN~FB^#vAr|Kjs>3=dcpsnC5J~9G*}X~-Ow-L_yW-^r z4R0=SvNwR@YB8Y9UVSR-o{Flci~W*Q<*v=+Gk+-wqepsMFc9y_QE;5hEroDV@?R+w@x&qoL-) z2I-RQ`>$e_DRuMpiEqWsKjvmx4!Y6D$gmw{B)nzUfw{+Drkbl zwA{U1tz2Yee*6P>YbP=Y^v|!=_xC`9#N^%F+_Au?ysfdn{jTBY=1eAb83YHuz}?Fh zAOce`@EP!RGBI7W4HjgIgo(nCU@#a0LcxH4FY)6U1Pl`uM?k?)5DE_b`NwO(`<2ib zEJ(~j6zc5YhVXGldfFn+Ac6gBB;vro|A_^3zmjpg8)wA-uhvJRNQPtj<7@_=Qjq z;_yGLf&gp*MM_8@K?sSTraFTK;bsrx$J2!tpS4nZL$KnU31Nf+&c z_47hP?41G}&p`S2q43>XS zx_@GUqy9s>P?RVHh-h&X2rlt=(#7~VS^0W9`q%_mqt0OYm81**5j?-SE))VRMi3YR zfP(rv=i2&-yV^KALJ;=Ivr+W#IrofNV1IBf9B^kOU=}zC4*xslqTn79?jH6IP)8rH zGgkTclzRpWSPS)-L4rfv*=)b4jGf+@}P%aDt#4xZb!ay+bzca3z zojA%BiU8aCqHO-O3eX_&-!kqQD2P887Y^i55)xn}5(Gp3FNBM>g?k_|UJ`a*=rd6M zJ>i~#0{eq-;b5ew1dvmrKrr~<`4;15Ct+*v=-`RPp2=EZ|DJFEW^>hJ5~8-Vok_ku|v&x8<=5dS*Ag~I=! zTR3nP0t3TPC=e9+ce?e%AmCUVUpHS2`mD43O1gytw)ma-VK4-cRs!<_h5emvePB>@ zfSaug8UqVBGe5tQZJ|Ki{6qo|7ef`Oorzq7566WGbe&t2TX&*#h<1mOO4^8+MH z|A7J(MTtX@;vfj%zyH%!Jnfx5fehEz#n#H$OG_Y9gpu9X2afX9kUpb$_f@bnLRok8P=@$z@~wU_X;f_R^?%^A}D zzcvuzPy~<~gD_Yxw3REFU(VWf^+ew1l+A+`pKRzXYq*s^<1!K1?au2_B{05x=i$N= z?#i2hzqowsb9w3dy^FOEOG7lq8j)ik%2x`NbU8T-FHHC!EHk{c$n@Bo9;7}E@QB&V zOE|qtZDn|R&}Z1#LEZ9wfAjG1)1$Am)I0tctMOZ(9_<|6;^b%!*^Oc{i=GLbIPEO5 zEqDKRWRZm82oZ1oy3*0{Wv{y9)57<`^$u!kZ~`4!Bb{}BqahX7OHPtT>K)gLr0eB* zL+c&0J{Eqb-*)?^Z-q+ZNOa%e>m#S4p1IY5??Za-Wy^mkOl_7nL9;MJy?vztx(GknOp$xfI3AYOd!g@24MB28Aa) z_$J@k6Qj%>7d;XwATGTric=!DB$2p?o(sld*6itnavyv^adf=Izy4ZDxsda1{{hp; zz%}HmS^44aYKncVp%2aO;Nps);Yk7;VWP~3Rt-5WBi-=u%z1G20*ENjKdWWAL-;;9 zX+r6TM--AvJkh$tU-v$I$F0B6(A&DrNQ+6^V6*Vn-kAf_^wLnmb0>oc?o2E($BZuW zaxf*ar*+aN-sk5ij?_;EZ|{RBa|8wH?)9h4P*WEM@ui(hyF^)0n3^HjU(cERqMviA z%WD5J{aw+oi>y2;_(dS41Zt0k;xymu{?oR3YyW6^t%&fLtcMPvn1Ur_l zC%#(X(xr=k2?~%leN9gD03wG*Kfi)yuMv0=H_4MoVj5l0e{HK?=aa8Qvg$UsctSYA zGgHMehSgyu!QMIY{C$dald(@BoKCeZMUmvV^|2IL;8~U%F<)ick5|pFziCQ-G^>2uM8AMs z{a(@eYevbmTp!aJ^b!0cccj?AE$yIAk9X6>n*C1pr`=Bvc1x&dPxqcrbB636ar#<> z)CUF}*_CT;!U>c@L-cAS?nX$+l#Eql3Y+N40z14oY9zUr=n{$z;>&l4FIv~?)husQ z(Kv@Nt4Zx|Bd@Z8>osVHlRm6cw(P9xhfb=YcZwiRxF``ST|z{%SW|_>1(9NpYYDK)>V8oh9Vl1RUOJ(jxB|I8fKKTRN-#wo1>s zJ5_>AhF<n~YJXZb5 z{iSaB=6KI&blA1{oX`(NVf4zL4bc2HedKCjif-CV2->}sjjcU&R~w^AMEah;Pln9# z>xD7hafhY8ODt{c9(JO&6=AS}Lu@=DZ9{@mZ#~ zpD{n2Ytt!khz6Uy&mTn1XRYF0#*y?-8E&CiO-^ikDq1sI*nl@fd4cD$_}q)`jTf|J zh8OzwZR3+ZI%9dcbbA36JNki=3LE9{4L9deXkCx|xFG4v&HkIN?^%YGAw6|`E(V^D zIady!=NwrS74NxT2-9|Q>$G&i5^68K=w)0IN_`Ym3C?${VkaVKC*KaJF!b)?=Mibq z>H*iT1JoH86Kn#i=YWHkjJ zE>f(Q2+^koSJuhuF7V=9KB>!ddg3Y{ot})j0qU2hijw;hAK~cxI*uCXMWiVo*7=EY zL|@UQhDnD(9+p#^_KAg(o@j%lszBOcWZQ1kMK$*EIHr4SBV= z>_me?u}eX=@;Q1(t2@h46$;mhuRS;VW`MuQq=k$7v-pZ( zoxy?;L3LqN#p#m@&*hlTwcWInd`4Hofw0%i#E;Ahw2o7}ld3Uz2E>mPI37&R>>P#8 z7R4}-4uOYR-P~)N9vo`9dk~>EQvH0mN!6bj_@4N;sK2>T<#wgH<~X?^K37knc6Vsg z#rJ-eq6#Aa`9o8lRD+r_iHNN2g;h#pzOvgZTV=xMoh}_3nzmRu=gGMHu`fb#St@UB zR0C}zCSJw9d#vv$hV!;h9NX$~Gg59`{ME5kY0*cIylv?g>dfjyCK7z8K%_g)2bN-u z+?Be|@iN>59?I1n9$E5x1v^zypP5hXV(*v4slI=koETTsJa6|XYj=!a%G0#6O?Voi z@?r{N^OQZS%=KF6qDcv z2QN!%0-UTEyomJr5)1@QF+-5X9K|W!1D za3FV-im}h98xf*fRpw#(JWZML`Bc=4y$*t~&#Hoxhnd|eP zj_mVW@PixWRSfUiFa>>8OvxnMj%XV)0#!JBi zZFh|8Oe7)^b-3qdr{O$uqImQ=6D`vRPA0Egx3DkB{3XI>rWTGt3)@4iM9cuD~le^(4 zen;T)leeZXsqQaDl&|YvdTV1bT%fMAB$QbnlqY3hotAfzv%>or5^ck4@j%4n*{%iFcae`8DkRD{Fe0*SWp;`ykK>W{YB5+ym7?m?mF{faa)Gtn6=w2O z8s*#-f8)Av?7tpRSs2Qkc+ortB8Y;1@M!#BKA7X~!-MB9~?JljkhBcrywP zf{>B9%FAd6a?QTC@2ZK|s~&3H7~=Sr6q=wF!cP8rGgP#iOO&QX3{4f{qk$N^KVHzU zP8iWg7s|hSk77Sqr%AZ$qszi-P~=wkqV7wbpbpcEJ1>cLA?d~q!Qms9ziYb|@P$l7 z^}b+6z|$^WkcRE9CQnF;-@U$J`*D4B%l31<1=&QJHxo+iGkbvxOO>I)-G-@egr#qo z=J_9FH07-gJbZk=D-Jao%eU#rrRQ~UuYLVOeVJof!#mwFV9s?eX7#a} zJnOC7LeFzGh;c<6?tdk5+2J2FW^5<4rQ~@MGo?hHNL}1O*``l(KeMx@%zlLW7h*iBZ z&Ow{AY`GXr_8L_rlQr176Exqu`~5>}Vre*=7RM%G+xe~U`iTzSibFORqS~FLFR|k* zvI(nYL-KB<>V&i8+`gGG$^1&PpQwedT0jD%pN?(YIH!ER+Gs1fE;TuApgxC_?v-J9 z)&<$Jtqg1KoVNtycX~Glr7IhD)l$1`nLKObGb(uq!n3-Q1d4DrCAM7}u`=oN`Rr0J zRjbjp78eU595~Q5CHaycDYSBgVx~-Ny6VoAXJU4e z>fwI;=r!Vm_2RgG@55Q`gF@QpYD7!~(Ky1+s03+qiQdc4%!;& zK-nMn1^91CzG3?O{z_?4qLT_^0h#%C#?mAgTM3J3VV~DXZ%)#YqQ0K+oD!2bO#_`^ z@s~Lxg%rDN6TD(Z^K6-x}CtC_ij%s<2R@TGcHXv^)-smCQ<9RDP6<6-BXWzmkl_6Ga`ow(M_rjjtC^0c@dD~fj~c{0Zag9y{w}%@ zr$nq=$u%E~EQ-rjUk{&U2o`6wS9@k(e4^QMo*||{u~>*@NVu>1N}yp*t_2bCs^v!P z{j@lxXK@avckU|}YD?NS6g~*O;}FXZy{=>V=uj=JzNfVOLd=M&nAC}d>@oG}{Wk8u zS7yL}<>nX+I4*<0fg%AA4A}FA!l1zUJPZo|{WW0IMIQ9$VX6(*(cKNG)BuTH(^Vz= z@c_EufW>-XB*nz+9kCAH)}l7Mgplw8~TpaD)L_DqS zu!1tc&mZ+5@L#&U_G3>C7|f5|dSGiw0xpjHv7?3h`Qm>!91XM;1}pE4b#V7`#QOg> zIuT%KB95*eE@)S@8`kQFS^hF8#F>4%|7$M_i3CIbx)-IH$kXfDv{x}Vd>&lSd}p@R zI(&VF`Cd>GB5WX{T4-WO)Ww+Sc>AQ%=$ddPQ~!nYz5Iaz=Z(sY^zLTf`TCU`3XMRX zKMmqO77Cm>UK%es4S@<`Tc}RsI34v)5B6r=Yfrx>wp=>d419j@{#5$y@}qr9+Q!$i zAt8rLE7bc6Cx}!_g_DQ#+Ihz-zS*->G933jhfUO3wc3Vyfi3(nXR-aWHXGfVSy0Gb~{F(0|R= z;7NN)$F&=&qc;22np9pD{qhJ0a`O{Pp(-+~QCnIo~56T0!LA?nmda(y)(&kvvtQ>Lr zF8e+4KKk@2yF+&h$mv%P2EvZJNve>kncX}IvjOc~(ZgPklDYz@Vy~4HsGgt}&vkR- z(}+)N2d6K7b5P*n{Np{=1iQy-sM!$z!h2s&Z8Ic`J+s|gBCg5E(|MhAxi_D9Z{M%U z44Di%8Wf1~U?|l0EeVvXcu)NFN!z5p8B^X+`dW&^rm+ik{MA=|yQEBamw;OSOqOKFYBQ4@a8b zTS|ze4w0O|dx$&!2-)ut%F}v2%{82D=RFQGK4bPNC0pY=bO#?H=auG4D|Ki43I9Rl z21i}^eBqwp&FRR&4uz`&F)+W37rw9S4+c+E2HsO-vR>IL2yJbCpd>X$s*1Q7c0D#} z>?1{cnq(M6!fPu2gSsp(D`{$$wrhs53T~{OLJhQ>ubwV$LGOJHHh^O@NMTu8<6@oQ zNf&UBfkHC*Yudf%Ulpk`I+FGIYDl1%KI(^g+F;xUb`^CBGpzN8O4_s*&cp5RS^A6k zm54+YFm}8gI^wH7u={%cwih{*5}y_ag$74{I6q5kiyDg@ljMuf^kL-ueESlv$nD6| zHgKEN%^?cs7^HGkA=|7U2Zu+y&`MAxfo`b;4w<3lX_k}dCmEyGH_SN`{Mntgl>`*- zsRTtbMI|poP(E63m~ zbdA5mk3)`cO__?kN`qbL%Hh)&gr*4RP^!#RcD&)0j4&g-Hp{EgDO~C6+CDKD>C?$M zS$}?EJ+6hCzI?!FQsTx~Gu3W?+pP(mBHueE>f!!yrG)pD0x8Dx%E{a3P+Mss@lSFb zHtFYmugG%Ui$>ZOGt@+hM<0_>Z*iK5u8>G^UR>orf=Z9zv{IvtBMp|f%jO?8-Ew{cgGem z^-z7{LK9kDW~|?rgYkcm^EZ&#w@B>p+e`kSLsc& z9%cJT4C=HFM$F%QqiSZY^;DB5vAx%e;NAN3(TxvzT0bAI&iYq{WI*`IJFeKh1=-BG(42ho)pRn=kYC2vs= zm0hvBOc5c~U&hYJqZ{(ZTf0HLOSN-=TqFD<{^0GZokxo?Q|wBLOU6pn2e@>u6+$Gu zsUe1Ll5h8s$Fa4Pb(_V5p{Ir4@LtR_(bL+#Bc;=MX&rVUs*&YRRqcJ9Dlr#>jE`Lj zQEagz{sm^1T4e&tmY&}_a9FP|cHF5Azi!GOu{|Zb$z|IrKv<+)w^R9?rIJp?m(Za1 z0I3*1FPF!@?})1Rq|PVOz1>6gj@<|IIXYgl>n=`&RtAPfz^_3x%0}Q|$h7zM1srR- znSA)N$$-SVXV|yymn^rEt*+ekY2WP#dx8HxRM|VPP&Fa*JxA)pvAE5~v2J@kOandS ziImRXTP@g(zGdwcxz!dTTm3%Q`8?~}pL^v?1iDB*NFW8ss`u-vP3+~Ilp8uHQ?6XN z%fd+FY#m2!X~1qSVosZ6oZIVJ^TGaU0xqycyt_*h8GDQG^_+=ML6(#wqOxfXDtPJn z^lj?Ez&A6Ml#f?k8>j=N)J#N&E~J>{>AVSgnI`bk=Qha|)4Q#AR$p|Wd^|T=A9ZIF z6yIIr2Z=EW)Z!-=E~Tv&kLldEB=hSLEcOs6Zv1HP^++}}u{OI5hU;W>=Tps?j~=eC z6IFM)H@G*KqK%{0erVaVqdZ{O$_q~*&pszl!fU>N^7B5_w!k%-lbJ_Cl>fuEo1-(- zm%W~SB=x^p>Q8ce(X~|IeC$$?<%1C>$G~Ts!!~pqlsu|oHm=~K*I?a1Ck_)t@q_43 zrHNpLC=I{I;46u(GA;$3LB>_SbmrG8uWEw5=;@yoOUX~LHKtZi`4ssIHW$r=kMcru z2U7Ou6Hj-RXG4y^-db)s-VQxI-gB2dos~tMZuNy8fB9IFhZL*GTaL;xKlC_m2`(>{ zzT55(B^Mk%3aR9|haafWc42^=b77Q+0iR^dZihO|lf}~G0Ea0NA8cR}O-$K7k<5^u zKI2N$EkrQK4aw+?jl9XF-qU3ijewuCA$pub@ub(%QTbxmiQi$4Vi{6lqjSVJCmyZa zxh_NT)lqSE*6`!J6r^)w9lqEx(n5UobX#)QzbCgD+j1HCa@pi_-@#1!Eo8#iiyEd< zR{QcVype5{c;+GbH%2aSJ?YaqewkVKsPX&Z!wd5r*Fy!?4;7b%TAj2-v);ejJD2KV z8IEVOX|zI2g)=SL^>(6%M9mhuSXo#*OFSh`iSk+@Q%HM!-D5hrzIc>FzjA8&LpAYL z^D7D?ZOUWcnM9f7aBoAz6ap4)j!NvWtUqg)n`f7HsQ6eU#_#0ZTjK~B_QbPQ3r_aw z=d$pOuWynMBpjJVI`PgBTj&^lA?kYANi3;o2J%d>(Yb-~-SaXdd{)$ywbV6~%J(3j zE&itOia{&C)&sA$IcX?s(KEyYK0?Ck2&>_tcwqP#4=-3JGe9rNK)p@76!L6c8EzfScp6Tki*Y{i|u7CK&FFKfLya^Ik7mdTsskpA4WYOsiA z^O9%za-h!}>)u1LCjCy6r~W4Uo)bb>H`x_aGc0o4sSqDizF1j1Pc}tWfLPrhkP4q( z)HF3qWGogD)j!s689HFyeVl-2l&@NI*-U$NDjy0X;31Hq|Hea}T+H|Yb%?ywoXaP- zTNOc|I*}1seg#UZ3*E`qk`D)cEwAH@vAg%~sSc0$R)9*@ zbzLp)u1PzXAkvNE9YplFl+W_sB|t0`qVS5k;SPO`7Ld^vi9+?D%82 z8)F(&=<|q{YY}l}_5?Dc*BwgJnl;%&2Ak^yn~g-};vDmlj9knSTaVCO&3YKzrw(dV zEOn!I_&<{xqr3EishgENs~#;G*r z@}=yZHE)gS^1VM$4SAJxl}6+1`8Q9$;7C@3UUxI>8-4XiO;OsBUhxQy*GS^>wN88M z$*rg$93kp!dbcm~xlq>ob?01!E4qck)M@WxR^Eld8GCc<0fnt-X_$4!%&6f0?x+@T z&V{TVHJD1ddF&v|-kCVuH{P(Hwv-F zLrCUbsm2|ccrQP|o|pvTjXg+6+Z(y{b;>1m3X-6gqf+bTmPKnr9{*4+sD3%OMEx++ z@yMeK?{#7C@*uUW3Noi2=7ST|%c3A7m3q)AAmN-fMK zy`!VQG+LhCyvU|+qs?Xow_Qpz9O>-hrOtoikock*QBK$*7z?s^+|6eu=*Ka4V{d5r zypa&^z}`KRCuZusJIjSeeZFJMNUSTGVTA$p1rs=l8|}HwJzL$o>J`TK_2#kM@#q%Z z1AS`m{xGTd`B1ryfLv2vZt9n<3v|qLZyzSLroHzf$Pr#QdK9%)(7g&T1E)F9$635H z3ClTek8;qvX}z;=qx~sm>G0caGF-_Z-6Ioy*I7{l5cxu;tK$3D7hKkOZ5Z@YPHOw^ z#)kxI&uT4VqcV<7t$knd?1hYXmbuy};(}#jaS1f;R~w73inZhF3|e+3Xlqad{0;t2_1Y zd6cCdni8ifUTXccj*;cgdV>|(?P`t>Tp$v$hCvO!~w6bViul%0F_A-;4~)DPUJKFdH*-j(mI$k0<|=tnditAQz!^4snI*sYYzT zEqhKRvP3&^FNR@fyMm$BSI{y))qpt2&2t_4+$T$zC&)pQs;g;gm(|_$F8%Q+yR$X% zv1`jRf6zLLUm;3uSP7A4QS+XP%1f}_TjxtaF)nWzOMUp;ac-=fjIVsZsoK<#M(nxMGty95iPdfFy;*hi z2hZ1}rx~da701+$u>0_J*LEq9J*k5hJ$)LJP{lt6Qs`cq+_{Zk2|ltC%52ubIwyFuVZeXeiL2PJHfn|?)#6if6_tb~BF z-zkM))BW%5JM`%cMdvujDHHuEdFzr`Wm?eHV}yDzifa=-2U#-7@| zh*<2+9JkZmVkw`ZVQP+|Hsx8(AVsD9K1aR;u4E-Me{(gT19{^HDxIQe@>GmqAF%4ICo36%}F5I|?5B5K|@sZ0NEePuk zCVXK)ODg;Pq}}&T)!wQp=(((ld5irJb8x+rsNBPqmWB4v>FxSo9ds1T@r2W_>l7H=MKo3qv zgs;}K66@*~ou`HOtQ<7$N&y?;B2d)f)H^)N|#y9%hoSvK96c*L0V*a>#PVTA^ovGe?X-|Bm{3irzHz}Ux z6_N~pKDvx#BEN=>fBAay=<)yy9zkQbazE=7>J7n_H>F3Hed*W^UM^h{N=JQk z{Y6N{H?@c7mH5SRVkz3QiFGdd+;sxV#M)TDe%{|oaO!h@^>RG&E=1Z?puGNGNX>qx zO?^^+YEuNM zkSOsj^+CmXy7lyd=?%KP#>s}> zfG#ID?Up%nQMsJJ#HN@!7eg~aA?dolD+J|^o`xvmXu|TfQZ=_pmWPk<5SrX$pB4R~ zi>)zx`eCasWw+d!zc}ZFtSRSo>5Pm#oN5_vdY%{&f*&>TXw;&@K(8*Qpm$&6iHFVj zZY>wNzX}1{UoPaJ5>YPTMZ_$OcSG{=LLdpUD|yJ)ut4YcDQ_GzS{1lTd5r| zeRJ-q>X4tT?8%RPR9nid&laoI0zSd86Wd+i#?Z!1wHWl1qts--d_wH26IsWDw z(#Rz5NM7-BN3S52NK5XQL@N@h?ER>soRXpkQ7Er1nJ8n9vd8n^*;ZF`LE{F@X;Z?| z^Z9x%kIaiHz9y8vH^pD3FPt_D4RtZS4jK%}t%h1?R;1WmQxNR;tg6czd(fz&!aJT@ z&B~(rp#qaRtCsC)OwJR>pnu-@x;WV(E@*~0k?Opy(qf_75RXhc>VXrnSM<$0d_)^A+=_!+oA zuWL`(!fqcm5zF#w!JH4gR&uGg%^8I^7?4QLBocX(wNsr{qQw$Z%#3#4`&3&hu1wXQzqh^SxWXXH}t zq18WHjgp(L)}q8lA%gEvq(t)*U0YLs9VEH*i0!(@l3SP|Q!ysyV-fKJD~uutYezam zSl4*D`szS`;s@|oEE}IB-e@hoi<3_NVC2naEY*A1wtvC|xlTvNAgtK6Olf zG(!1kvRfmapdlb}4o+zE$(p0K{!8Qmb{|sAUU2wjv`wFi!fgv=ru|NL-OH!-ZG1e( z))@1Hk1L)eJ2ED&m{&o(PHywm+%kDkODXHqseN^2OJU1yc#nBdQY3mSY6MZ37 zw|LCM;xt;%`h{Baqopf?iGuutRoSWDcKb&#B?EMY5=n}6w=v2lFWx?))KSCVmW(?a zlX1-pgRz1tG&?6g$WY(W)lh&&SwN1c2_b;<`pLP&cd0Y`}W zBWIi%iZJ$%GZ}48h*hrG_c(hi;kkr)o`^s%Ud6fO^9JII&(s_2ZnygFht&3O)!1KF z9y3{>S$Lon@GQZcD#5B&u-eCkq60Uq(}N&~qg*Eyy`s3Frd0TZL4z^2EkaRU*hERD z5dEau;(h#+D>z)5FA?$W?jJU5soU%=oJUa|oA%h7T&3EB0z(ytyV`Q^m`ilct<=Wcx%k;feLsOa0vNN(YvB;9lc%ubKr=<%d!MclJ&^BIZtO&-z$_s5P;sYE++FD`Vy)eI>;@P=-iJ-9#XfL$4 zs|ZvCEMjM62bVzG*;rYl(SNnVuavJK#Q~AvZ^y1b6rJJ-7z8*71xoP$@ww~&`)2r+ z(itS6#QWV0fCT`lIutApLIQ`lzrFSkT-n#r*-_L1XdmF^g0{6sd;R9sw(h=eF78&g zVpdqJm5sxXqgsp@0#N1y5>cd>j>2VqJ}6k*3yt~X3>Wwna0lQK@CV>gSy>)<TuI{#uc8)f`9sMc+T}m!NfS~~J0WlKLnSkHQ!|{?2^snyo zD|J3d$j@*Gbe%B3Za}{QKt2u#EdL<|{QtpjzfzTikoZY7fW{FnDh>topCBZlPx_~J zJ5%kT=_=^b2;e|KzYCGyOCH<+x9;i#X>#6O55TX!3bC{WxW>W0Spx_dc`*`j^WF77~FVZ<=5R$f>U8+U+Rt!#iu z0^auXT|6G%)-H}VB3@{FM-0%p=RXuH^7|j0(f%SozT*e$I@$uf`-6!wB37>M-fqA* zx_JNi&|i)7E2}~fh(AhPfZ!AbWUhe36#)pb{wc})f5kY+Gu0*kWis)n(iH*y$+o|# zCy`))?Ep3Z&&BB)~uK`g0H%e>WRo(QtQlwBa$Za`8rsD!RLR06r`4W~=9G z+H z{`Y1G5F`lxV@m$vnskO+|47Wl0M-FW7UKjkfLO%gfNT=zBk<2Nw=?7XqhbyiFI40QTe@1gSpjLx@8xr@ zK>Zo;9OGhzaR5?W3lA@MEYJw)e-@nql>h6Z3J9Y=91$p=69-zc09PBp1;Rg_Otgm= zkdBKwTG{-blL2jYAYy<(`ft}5fY$#Pq6doqe)mKeu;3$pT-1PIKV1N9(|3c1hq%bhtEe47(W( ztAqtro$Tqu^-{NMhE>XGr2%&K_UDZ%8jIEGl<&xfkWoc_55oUuxV5{PZjt-_$T8aQ z7<(udOmb^$SvE9O{N%&)mP^4$fxi1dvE$uk$Ps>+_pRBGfWw0o>Tgk7f_7|AUtpga zxcx3Ym`|2Cc&AUtN_Zh>N_>DQ<+xY+JKxooK7+ zdFCb;`0z40(V`8G+L#y1eUm6VlI%STBG;*zzSgIwPz4zg8~bjx_3Bi1!{kUyzDq|| zZL3Wr>a#Qcw4&?HB=A5n?)_;-+7B~vFQ4vl=X{kCYnPu4rls07ztJDESIMeUs%E|^G^&V|?56a56 zb$$G9ha-HSpOZSTkGwfty6JG+f_j#Ddwyi^)?jETo|IqyY*l8ncM#s-!M7Wq7C$iz z+{;^pUm`7k!AfwITkFFOC}(u~hBRm113+Tbv42NIUw!4cwdVtV<*}Y|$CCWM2S-Ml z{H&y`X9@avKl46aQYsQnF9u3yPp2gkUVIg?6E1NHm+)psOadGx{^sjeC1;71%R%fj zJTi|_c-|De$}ElY5^HNkXbzJr&frJbX}={c6}+;iW6t4b=jJ%)^=Bb6LzUBCmp|iB zF9B?k~qsLTS%b9*)hKEMC9X=Fiyumrzs~ZG@(tk~FtCx>@; zukzrU;8?7^yXBeQdx79v`6X0t{#5w91w%J+uFn&WoY1@amPw_M8_VwlVb9`gsCa}Y z?n~%?Lr3pYJw1LW#EM}GDtzrc2-1M5v=P;De8LWCHn*{Itejl-DBj12rq~bM*}@S^ zvKN-9XWq1rziYxHQ}@_qS%<8twkR<(m~VzJh*OuBG_W{4rW;o^iFVDO_FAx0@RBd$m8oi?ndth z+KK09cn_W0C(_oC8m&@SzRkCf?smXYSrGj~kEyES{+rz2%5V}V#kIJUgkQ|I0lVX6 zIEJgcRe?MNyaX!u1z?XgnQ}-{E*Xd7y*};#dQQD}#g<#J#%a(0QNgugU;Un#Z$6vd zi~8@l?$>3IkVcK%AW`%(2on_>N|@2Qg{cqOpbNgAtigF@QZZWe`s#Wt)HeRjeP`aG zT<@(V8=;IHW-Lx`RoqSIoIENKGf$%DY$0RkPVw(_J-AsmWR~!}E1g9#wF>6Qn(bmT zkTvjb7*PwZpc@uaYFtFB$k+zPxw~&@f#xYX{mH8y$57)@MLnD}xS+7Ja$nFq3CY-M ztjY`B2=)$<-jh3}e3x@aKZJYaA*H2^<_1r=snLi1QZDwknd5Wq0ed~d@?ED`5wVsd%@HZk-x_J!N);cjme2Z*?#~Ppl{4D27){Ki+%i*uYCx-zZ6N5s7Lygf9SjUe{0g8-AlrE#7>&n}*NY`%-Vs5URlO1++) zr@B{7lq$>ieUOWc7XC1`J_mV%;KZQ7E8;9qczRbhr>*3j+KiVugdwiMF9co|TRM_u zSZ6!=EZ9q0YD^XQf$77uOn-7_7MUtki-NWt$m2p1)eZOg#UvkoxMqD*~zO_Sr{wAZ4}NorhOHWUDEGCofDS zUH;}@?~$wTbP8@I2hv?Q>^A@t`6B%aKgZ4)|yCh>}HJEnTa|OgzmEe>?A6LLeu5%~0tA1y#oMncGLlFz#Xs;V|-GzZqd zS8*BZl20Yt2^tYkJv*2Ooh3FE-ErC+@6hSOV!ZVt@ znS+^lSA5Jbj8PQfAP@Hmj7p0s5#1p$(%JaQ7PP8fyBzMZ!gTFsO8sY$D}2KYLd6BY=siLoc;ja9{kAv z=w!&i%X^ve+T%d$PnB?7Dk)yM9!^(VyjFJibG--C{Zc(*!z-`@qMFzqzlQtUKl|%a|tg$S1X_&*PjvRF{LYqwc*l!SpC#^1mdc9oBQK#GDX`!SZ(MQr% z)CCB-i;WTTL-VGdBCD+z{$r+{Mrdou$i*hMkl#4779&)>4+(Z%4DG&y5ct%!MS_cF z5gTn$`B_oicd7)RKv9${MO*%Mq1k|kZp|*-7LcH-;xdG__`{HHbd>tY9S$Jf{GbMHWCV&MLC`ZYGz zSTtTlRT+r{7L-qIVuMYA)k9&puPg_Rj2bO#JMrpP+Pjg7tb*{A8L4#S5A&)jU*+ge zfwXK$U3E++cS0lCr`DcBfhM{=Wdw8f_?G$pv8fxg&6;39~LAE*945pq?c=>5DsM(D4_)e~$9#vU)cg+4V-8mJn!F$pz5NnuiP+VPA8^ zsqa?h`3{il59&QRceL9TsnzvhI&s*U)*FC~oB7ih%Tbt%=BTP(b4e`-ld4mw-WJrL z%n`CWhnEbs^IufF|JD+KCw8v6kJ-EGfT&xe*t~ULWBaqRHW(!Q?ZyuSwuyaa!%D90Jl=XXL6Jpv{fhJMT|N|5VR{tzB6MM8a} zF#yN6WQgM;+NfwtB@75|uZ`bW>RkMtOZ7-{x~3wLawDp(r)tOC#KvI*|;9|BN>g=Id47drV`;=#eGZqW#WEm-H%;w z6WV-}KA$Ie>%DB8q4Nv)-1PPaAXy~~x`@W9O>Ned*$oEsL)YtJ^(%hfO2#-3GTzJ> zt2`NEP}MwE?)&WKr01V8uHbL{TUrnFCPt6VtO}3GVy@25bH7|fM@@3ht^9*I2D5V+ zDi74vR!&q{i?xY`hvg|IBFs`=DXTDX`dV#m`Xf1TPd+q-@eIvuGh zG2$)oFUChZo#Z4p{tcV>3Ny>7l3E?sb{iLFTsNJkU1)g@-%Chov6`3j34w33Mv|aGd)Z^@V)kmi9}xf`^v8R&*D_)hd2^o0!Z_74xi zD0RXjwQ7RjgpX!BAp35CGN=7<@Oordjw+n_sY!6fN;5|qYL7AI%MV3KWv4;cG_TsC zwxtEl?P0Zo0Ts(DsaVhJHY8gl7vCu|jk~Xw&O90ofAaXZy%VZ0X=h6`iP2m@@XIZ~ zJpxtS2VO8j{d7Irqy4>h7BPQ@a-#2{edbiUY@R<;tj{{)E_UDU@PWWK_ChcCs(LB1 zDXV@rn?2p=AtJjMf#$+tFzAw_xe;g=&qLKeNpQ)d<1;$Bv3JC|KktEQjEHL7*qObG z=7|Bj^cFu2qC-VPspi+R@574`cJjaE(0fv*!B%K-LPyV^O=sYJH~wv;@wNZBI`0}5 z$O~3g;Ht!{kgb7N;6u?=w|m^mnr+Q|n%4$aqXM}toh0Xyi&EOanEZ3u5tHz{ zW&`#$#c4aOjJS1Z`aAnYYSOGupW$MEs|}_cq7#!ZS2lST+cg$f4J_XZynRNTmI$NYeD^Pw01%F&EV%G zM|^!#uz}l`+uyG(X5VsD?o}2nmTAIp`6GH3E%4!oIWrzgBBpjbhk^wz1}o&=)1u`m zeQvU8ADv85ZE(nCq{7O3VUlIg&o11#q|W3iaq6oxR-33e#C{Yhm{zMhR{^-w0Zz=AD=#v&82R2p$sC^#+-yO3E;@6UQcu?W* ziUu9rhmA# zLCu+rOPI9~9djL-9<-$UiMbXyz+^SMyQXW=56mdN;bO{Y+9jn4zb(5osdhpy^%tuy zdJeXcYJ;_IpLu6?y{lqQG-qW_I~#;stN}`HQv&zqBqC$Jn_abB_ffRpM|z$%tx1Iel@g`GSlHd<%zg!2fl2%E7R4Ltcl^} z$01q`U^g#w`7wLGYWnuH;t+%cuAqspcqwg4%<4SG4{dj$GdhwCTM&&p7Pf+2ZF`b8 zcHOK;T3=PDoDy&#Ls7^G=5MrKX)(YHnF>MjN*)sc$4`jX6S;^*Ns&Ut5$k66N~{MW~e*UmJ6N6eGEGrtEap(&GWIo z92pZREV4CEkJ2ff%u6Cfel`Eq)miGtf`Ag%;F)4JBSGM?)3SgRnq!@mM-0def3lno zO>s<_a2iejv)QQ4XUf7yXUcnQh*x9MvA~~%CFswmHZBuEn&D=UMY#k;z8qP{rIBpGaf^P<87zFXy*a21 zsRf5%*o!&+@Ee#Kl_*<4Ty)hcy%iC&l}<3ib5QR2u^vkszlHPx>)~Zxz9V=yd>$2yEBx%ueJB?S-)N3g^VjDNZc0lGgj%#jVI3e z7~CUMH~E{bo#@PSB{VonID{#=_0{tNY>fzMc9C2cf3Y4bWb&txOF#<>{v?+9pDexsv{xneY`R{v>XU-mY2vt&L*TF-!LcVnuvzZ0-u(zajsbn(~?9XaS;JP|l{ z-D=|U7nRW}#|ZQ^AT~30eRAY{gQ*WLXtSe1_|e^Z!x$`_QH|PqDjZ$6eG}=Nr=h|+ z{Jn+~@dd#M9@1pP!-XtAy!abJ4N@#QYaTp`?z9d!^9X`C@6O(Dux88 zKX+SPUYUbEnXsbBtD)q;t@at@{El(o93UQKOW+5+6sAJm!<=4U8`^O=-Y{J4t5(lu z3>+2QX6#r22+gF5w!1bl3Yv;!&fDKa*l$WyWnN~M57ifg>u+zb(vCH%8;{|v3?6vmey5&v=EE;-p*1$+(D3qOupELFFF9&*tZm^r zU)*TbqpyZ*-?8H<)28Z7ZouJm14&HafTd$H-U7}J4jRsw*fu$hUmM_ZrUrr`@bp-< zPBYm-D#LYsxu}|>3CSRxreRuKd@ep0HM1r4B!mQFV_d1})RKaqQ~OTjI~ev*s0fMQ z&-~5K9W%KIA^s-8!#Z9;lRUJ)jp`@Q>+z)K6C2+_eZkf=@ekpNS3xQl!G;9H-j9ce z(j9!CBX(c9Ubn^9e7C{M-dK;X*Fj_bVr-$8a8=Cnq>TWB2kz2%tMn;c>6utm*9C=a zawxw@kEVG-q&bx<46(+zV_ZW?6MJyPHMzMrKiIv)h!fuY`jsj91SO$%-Lu!U_?cp0 z18>+B5}xbhj&5v&rsB4a=1=6Rp!uSe*CX_>$)%`)^wortkV!M5P5^%^Op=msxRmpt zCV~a=>a6*j&cbHhjVsRTC8%b*>E75fo^67q;dcmH*j|+j^G}7?D8}q_jY?=(iLowb zgt=maDc*!~N|romGG6c`jQLQtrbWwVy5obPd}6}jfn=>gViYvLwzgr-o;31?_U<(C z#`~F;=XDt!st2bB5l4=xa)Vf0U%@qI3i|^C@sbK2tLzvI{mc4ky}B!Hf=Sr3)~@0M z7kxpOw--p%XvMo?>)$W^4LvtcMRRB=0WEkaIhziFka(4+8S`!^R{id3YOjx=f(dUQjJENL+y#JU@I85-W7)1pMXio!-TlNjR&S;;GwD8CIBZ z;_OA*`{sxS>@}VtH1cI~xrc`e!X=bE=e;=S18A#<(SB19scDqekY$_@)a|J0ZDq1X z25C!Wer~3v3&a}sFCRe7$(tt&m1tVY_oYkdd=uMftedqtg;g1FvB42MyFL;wr`t#a zRj(R9U*RxWM>(CK4|qd^b7KRNaa^H%JUbmkVhm~^{NaIyAfn3G2On&G#v{0FJl@^i zJD140hEi?~Zt|noi^7tHGLzB~#0-25aAD}TMqM7?}8F#~zEIlGS(G*R~8J7Zz*R)g#;Awvc z=*E)O&0mdn6lXwf($LOF%L$iv1C`E!psTiZN`|f>9}rTQHg;z-Kp@ZtmR5-drkiJl zpn8hzFY)3HdNj{Gtv@n`@MI#Lh6dc74Pj?~~HxhH4LU;6*%XLp*A z8#QUR+s8NvZYV)(MXjSZr4Ql5ANJ~sKmHUX#LBGC+#qvzzLQW^|fmkxg zI~!2%44`p)h>hV|bxWh^v?-ZY4lBBECxmZKa+y$eBK-O8;*5zCYpOjzXD(HN5G(3wHkNbXTs-x=j#fb4smTq5g*uNwr+j> zS)XI~Cnys>_LLD)j-v~*bL*yg6&>OF&Si))xyp!-um!cln0kFrO1Qr|%ob$yl|Nnp( z1SF0B-F0IDWMi|ivjd<64uEBa>mTdJ0zh^-xw*Ii>-HD2``6$8JtF7dB?dYFKE=Wc zaQ$!r_;CQ^1(?G>>wy)ZpkQNW2h59|^FM0j-<#LJC>8)1_&<|IoPVEA0dP=%!8Ysw zBnrTM{qvzP17gDgvGQC1*~(wg*T0_1KgD0>e-Rrd?to-5ZW{~t|B@BX`S)QH=D)Z( zCV)qP?VsuLfQCV${@<;Tf34*I3d*o^FxLG~97@;98~|ZDI{rVI z41j^)Z*Ktrq{GC?4oKo>^g(Rw zfZHOh0QTuWc0T|wyd& zzyZSofQbM3lvn|(7G^*c9}5u+!2I;Db@|^77X05P2my2byQ6331YCgv@Zo^b10oRr zc|88Z6auI_KvMwFcmJoY;QsGFaRZ!`%>VR&{dHUc?vQXYGcf_~ zrZIEEnMy8of1ZNhKV%J-;IE&0trvqNbv{ zfFi*=JX@Y65G~>JOuuiVTbW_D(1Cuw#1N^Iz1@J_PfpO>JrrZ&OxI;pb&WhmGM)}k ze))*q#byjl_I_Rsetqu35b|q(_IimkzV?1Sm8Kr_zK`~L|2dMoJ=*#F1?jD$;Xw>@ zdnE9Aw^vK}N!Oc~>tHzgTF6kx|8nB%_VQLq_-k(XvO6^w?r^L%<6x`R-eD?BBZV*z z@QRYV33FmH^UJD7@GClX$2Ho(-CoJaD8_;2wPpj6@%rOoBUHDi=mDi+GE{)E_u~;% zJLR`4!_OoEG+m#!%&AXTf9)l?`AOTakE7@9()ZBRozD}2p9=f}f3Oe{KmTlGqbFZYpWxqz_7YXP zy{B@j%^}`T^*K+C_G5bumi*rQd|~6N?K!ECSCv;yI`paE9PjWtg`wQl z+f}%Z?m66^LLuc{h>Ld_O`{JS+#My0wIHT*^Xs+A3|-wZnCH5}!XG@4Bkt3UvQ!ac zbdlzijjU7=|8Pwn8cY)_ex3$pyJYLgx z@=~VdDFK!v6U9hWROrcca1}9PIZv@$N0swnbWg9PUUs81KhaEdz^^yGtu~F$?@E_;fytH-u^K~6kV_fsP-mr zoFMC&{lU~6_IK=Rrkahv))-i(o^95kUv~xG0F_@oW{T9XsEt|0R`h9h1!v7k-GSM< z9vIH=OD=m*x=_N&BA96&<}#8z>yWE#`|onA>M`G2C`W(3F+AE^P0PphIDjIcwb;aW zPqP0uh{y;9;<>1w=oGU4SlTm6OZ|wXz2kuNEv67)Kop9vB^p?@o0V-OOL#=TJ!oyL zO8BE(47R)K_h~bsqmtq0whQBI#t_(Uc|crc-){{AJSosxo!^+3%&Mc)+hIxW=%5og z{~_$-s~n@bui1&OW*`qtkQULcm0LG%Rw7MiFJK3Ph36{$sg#QrzcWbyZ&1bzxf5H> zPfT<@3_R1`3wVv2rfvIO%VnABH3+v`2qJwX&RuNe=%dYZr@2zcw_htk{~5f=T>MEY znIle9VR23 zRGxlauVF9W2F=>pAP1WN#L!2>wsYWI%GtVvc^gwrY{#8(kEs#0XUZCu{beu6SFIG3 zKjwtHpO`6|Q=U68YD{Ttemc>{aewRdqic~2a<2iDGL?M-W70v#msEYHkGBA;;Eypi zh(E&uDa%p9*^YA_zW0I-hYW@dg_!Hk^_kei5uV+&w>f`~19pp|>h7Y+!IOMc>jZjV zvNPHb0pXvbwj*6)A>DyiiTUHNrQDJ^FRrEVYisx z5!3XI{d{g}L8x!f@CNsK&@Z;CaCOu^`LccEo4JBDwvHIwO8cvXE$Nbf9s zJzU8tcr9u4YG5uFVj|jDto@$Jw>&HGs%9fpiz?TyHAii6b-dtw%gg? zWMVh{M8R5)SZYpVwGQw&>$6%Q=0ZHo5(n zMv&-c<581&EnCQ*{RIRpBV-f%G*yXCt2rFJ+Cg>ISnnV z$I(=HaHr^V3b4Drnm5_DLuy#zgXyM2UUqnY{4FAcv1hm@p^VU=#j}4yx-e+SRy_7x z*D6WZb3sc~WqiB6KZ7&kuBj$oZpYsyc#$4=31+k8=CK-u5=;8VL!r~#BNr2TITU1U<&HC02J1+pn^#_E<|Y@l_=4c^ z+9J`z|NUmY4}uRHut~zQ^;le}7vi&piN1HIÐ&7LvXV2Q9^qWy+}R#q6NmB{Bia z)si>CgD9@1udF+{)_phS4ke*P)(Mn|XI!?jKHs@S(rzJbP64+P!j#)N#W_hz-XLG!Q&3+;>7%LqD`EQpbiA7!5os}Y5Yv-nuFi8X<`vmcT3 z?JZ(9_dp}&N=v#dm}O_X7lBf%`d%a^Om-MMJd;?k8KM$55p>(-U+xszZGEBT$DE;5 zIDsxu7-`Ci)YCC<0Sp8(IE(27B_AGu9VS|(mjf2s_dUoKBS_TS<--0&qmM4(< z&UzB9(3G0-Vi)t@h~5&;bs~|h+V}4ccXi} z0BLfa?IYyS3!dq)gea0%5%B>p#%|2Qmh%X~fvyv4DcD8~1I_FOc^I@rj@T>6B$G~#QE2L^ zt;#(O5|{o^FtdQ$!+tzUF%)~!M$vVrsQ~K+*Ng+NoFB3d4dP9w!nbRhC{l1@l|S*q zSRy8Y!L^!f*&AI(L8*_pp^!Yk4Z^3W;5Eo5bTD$_sm51$BEduzT2ALS=t=Dkn;m-P z`(YV+x+512+i?SfZOZvIS%zXWtN0gWW>cvT_a_F50ukIm3Y7}*rnvniY?pTG(~>%D z3O4NmVYa2&mng@PP$f#i4k2T8Y{p;fXVc0aMuCH(pEZ(5ctz2vfhQt(Ka)|S!UO|a z2+Yo4AN02UzP14}&_7KXUP~;!gA{0|Bm*CqwW3nFU9jKBA6Xs-gOrG>hrWsELJZl# zJo}&*{KZ7Akkr({f#{dOaVYow0|6O~nOVWNTl<&%TfYX{F>agA(Df|Ssbbs-ElPH# zGbM*Nfs|RnW707MKg$And8{ zfjXHPvAIZyx{#UCuo2V**@EXxCgfDW;I}#ibI7-+H&xGM!=q1zAisbE~kX?Z; z*SIqLur%^(*zOsn5zh#G=gWjFuk-9wLwjNfh}ekhyptj{*@+&we{)XLRjAg`YH|)8 z+YLU|*6*7d#*}fyZTKw)I(3?*Ms?mExfgs)m^jMXM0R-_SYbWw<7@LiH z(XNd(bxy{#paZHp9^q~SAJFfnt^z=b&HtDlA6qj<#=B^rP+?of2@Rubr9;42 z!_HEHa{8%%&taWOaA)(aYMtOhM=VNT0?Vxh#rKk$(winv!QT+fZ1D}R+f1x9cAbIv zZRJ%dyq7D{fL2DH&hYy zk>xp{8?3>XizXjcf{(*ohJ>Nwh6zEClpmyT&qW=(g>*VHy@|KeAcz8CeU-#Jh+df> zHi7O~mu8}vu&}I5zYd_%;}joKbllxV6IP0-El=8qV0H8^q3j_9n>*8!K2Pxa{F z%1KmWhIi{MFnb@#96d%B@%_*nZ0c1qmKUDD4nwz=NokLk66*Pu&<$*1(x)MK6-x9Q zBp`X4wo82C2FC$-KeE#trk#gjks|cKy<*=7`Mp~@`cHB>;#9jXyKf9o@qhtG%;w}9 zh1r8gA8XzTMWEjV=nrn9+qLc~Yd+DShiDCda4du~8HiL4_f=m(M6A;0v9B@=?#`GehbLW9Qy zD?8mnP=!H4oth16H2MhVj8oOfcgDnVxps^gQex4w4dY0TL;H=IB~UO|?)htee=}#s zZ)rrX+A3&%f?lTZUpHouFciqub6h|ME}RKADC5)R;6AC+mG$jI1ZO>+xC<4lvDC=G z!~F*$xPv7ZZEhmK=-)4*gma(@^O3g)b5bZd%oajAq=EJ9KlvY9yL3Dcf#XsL#e8>k z>*mo4p~3on{B&#*fkm%Ef8OI&AcL7E9ZFirMXgv$*guLR#)y#;;rt3ZisX!*--%a% zp8Ph_B+VZ2Fe~SlelftksZpH4u-b4l?12;wiCKBN6sT2|JFyIE38=5$K^%1NmY%tg zUgRGq$P9JoYi0PN%oq`AqCjM~rigjKMqv&ICVh)wTd)>11T=cCp$)H5_WIH*P0KEqfXt39G<2s+^hy@*(9wE-TU+@0qdtgL zZ#NFS*Gy1)%~V@U4IOjMb0l?zQg?XB9y@R8#OzEBm%<YuI5CBfoTFvkuVbQ32Q~ZQ zvc`c|T*37!o0yC~*RJf7;&+l~RHi%hev}2QRjE8G-zY@mk}FoKpIpktlI}Y2#F*Dc z@jyT;K9ySmS;K)V%)RTbUAbwwn`YKuDE|C$1Ip2tGfW9zXuHp0wn?I#L`MjGbl#N5 zUq5urwZVMn$!1}$X1$_We=wzKPpHHC+yni3U(P1ANbVe|>&)j1 zFry{00o>*hh%6r+CR|t7U}b~{63*^Vn^WEK0z$ZEszE63<#k%eGhbL+fH0ncHW>yg~1|v@X$w^yi!oyHxSC+P#YbCR1pN!jY zEzv?nFsqe-XW5s}t*oQ;aiKI|9DIQN(F}Z=Q@I(A}L-(jc(GsY9-%W2CjU{b*I3xLx7u7&VzKKVR`I#U%YBih&UNg;HqKVxE#lBR; z+a7dg-{|ygc2A=#9RdurrRwDp#CUb|g5$VwWcEOP!l*X8Rvdj`nJC^q&)t(&DJ0%YYL zouuvh1t~9tbxU6PrT`3^w#Ec>3J_9)f)*ecd^XJ`A zj*Twz3YC4k?2b+qUzao!euJWKM3%}!G2#|j7w1M|c>(Ez8+rLN+9~Jnd>xLDC7`Cc z%Rk`2v3+2ZB2vk&O4{env{j!LfpIBa9G7biS7RDUqdtT^V8_2vO~bCvsa_8JKwwW~ z{JmQ{Q1vIq#yK{k)Nir#4C8&^FxM-HmU$h}W{~LZk+--);W*)4N6|Fk(ms`fh#b%s z9Me{g*d;Dd*t9}8gMi7-SI^yFDKAb6H091)C`hUeRZh5h=F5@kh%KPOSLnCM*w;$6 zK=5{F3EwvTh^5j)EC8X^&x<@Vu$f2e(HocX+6yDKK<+Yp+?nL=y0!I)8{DUj9}!aA zdbpGkKqdEK+nsdx;6rcTzYeL?jrMHNZ4KtS2=doCYgg_0GFL&^Lp+9uAXBo6PAlHi zsU^kKt%g@pz_X!HKFh{`VFiq3;rZv?v<+;+0b98{Ef}?oE8BBCBQ6gg9~nX|7>9zk z9AvJ_v{^JprNzq%dqZ<<*yGDYqTOyEVfoyneab;LTe8A&9|Gmxa>nhLTqy$&lL>8qMuR>{8+fQvA0EgnpxE z8OaTl_=ig-?yZQ7fj}07VAlLJ4CNwz=7C7ym>ft zy#~@@&=S7;v~{_acOc~k?RHEHDY42opFz^fC$NnR#&5z+?yYc|n780$Af@?p|9X5$ zSc65TAe1HE68g5`N(Oipf-C8HMBVxuHlkP)g}S0We;nQf(-)?pFxC1%2?fYa(KXL> zPjZN+^)xM>Tq&rZ#LsKC`b5}KsN7S#bBssiBR?|lC4{K<%IBI~14H^sDO7%#)pa{( z!`xGRbfWp4eDF)QnNvgHBTi5`d8;Xor+Cm25)j~`%$E`b=55WjE!pwOSCh`!P?(uz-%+;a(t{sWS9j3VOWhiZK-^>r{b5Ed(N3qTLB)Cv&T- z*tv01zR?4cvmD#qNK z;pGfbA~55*3JvvO`F?aTA}(z}30@MmF+>2(=cOC}(u25j$V>bPlueV#Nn(&jwF|0? zT>#}daY{dsXuHc?@LgBLoF(wpko_ab5>t|6s{)CkB^e<}Gf z{DI*;Y^r=ci#;V`$|vx+N+#ru*QeG^@N^E&`NfmvmfanlOZMpZG*R|Tw7&OFq|lRn zvb;Yg>w*{74<~P#q~{6zpg*LsL@qlqLk1m~^2@_N*4d{O<=j*o#khV=$Fv^bIV~)@ zN_$@Qz6N51ULAY#kmSB15G;9oQ=*ShYWvnhxoG38RS1FFi!WaYzeh`1^k7oU#s{vT zNCei0aT=Ed|L~_C(+1C*d**{J-9Ep>1!us8*Rs^yPit`TgR?p&@HrcV@tFolwjXA) z1UE8-7AQ2j6aiebmxK13x-8iXuk?2l#z=SWkm_oEpyAU;R89x=v9p2@wE~dSc%WA< z(Vca_84gh>i6}9-0+k%7#R$o#C_y*ZMqstgPY9)IIu1bxoRQlVjy|0`#-(G6M{$a$ zeRz@KVj^u!p2zSuPC+LRB1fy0>ZYmpn`bib*H4ke7gHcoduYSuxI?M~^BQFY(Wmz; zE&nyyC5B@bzUo`JRAaf!QZ6rU_*9D;>g}Yo>KR7r5b7czBk5l{KWxIyL+0Pom#5dw zgqG>cNqnDYuP5wOBS$WJc|K$_;F2;IpKE5fBVcM-#>}wWDTy=AT zoY}l!O}K462)@QM>++hsh4{9?`!0XO{FA)4GpHwJ2)mHh7d_Eo^3?hYoEr>n$5397 ze$lv25%`hL-YXh@G6d7yr(ST01)nJcR8fQQ8vB4k9kx6&RY+n>s1XFvL#(N;qD|kj zpNN8*N@!$OrW5ves>m3cDzmal(8)(mUzj12`YO5IC^WM7Me0!_5ZZY7zS~b7rwM#d zfg{QlgDNufJW@d2(g%o5gpFX|P~h4skKU>D1%s!=3OdyQj{SHs1@2+^8jKIV`og@CF@IIFSiYq z`(;Gz8)t}K21G*1@I(I~4wU#w8PeI0iIkJZ&ydSz&!3d)dsGU<;`6WLh$~_?mwb|~ z`jM9;PmQ8$&hX$AD03ZS0&pHbc7i)~YdI_V7@qXFxWtPzDNvCk1KAKF5DwDB?=LAC zQt)fj-9Sa*60qU%YO_UF66whb07-L|VJew2wh?(yw-Hta9GO*Bj@>1xZg!k^#|HJ1 zUCI3G+^`Jz&`jda_pD7-H{25B9cU+OdAGj(h-R!;Bsv}@+hLCV z^kKI^ngE6DI~8K1^UkI4w^JH9kLp}G&+MZ-Gqfg8D~i`T>#8>DM3+n=5yxK;P!VHg z>p&WdIrOWp#!hj;re>BS-`N}4lqWj0)gtH{gh&8kse z`ZHFX5G32M`_xaVf#3&sM?)gh3v%A(h7BT6$mXaHG%+^`1=NUpxm0BJSWTQZq)+^9 z9BjHkjdfCVjg-fXVWUVAEmgZDq7DtQ6qN_MjlVoHs*h9GzhOC4*zjx>GZFY33C0_3>%wWm4Zf8+XNvsuz&v$L5l(Z!>}VoF>|Q#jLJiG014*)N z&xVp#ymUm0DD)RW8Z*l1EPK6|2~>tOG+WNXR(QYgpe6YC=b3N1<@j$rQ#`t)O2&Fw z^ky0*L{#@S;*hokY?tPU3qC|=p9R_=Ir=dhYy=cqUU+uwRrM-)YNPTpdY+cttQHBt ze0u`9yts369`8Wb>vOlbhi&)f#KSv<9>XGGLY7OF`-0-opGN*Q^F?U(1;Bo{DPnfy zhSsGFPbOy1m0G2NKlr)Yi^UsPZ_DQBI5>tV_F>QAyWF!Wb+9`#GK`z&5oOv};y88| zqzQ03NMeJ2Tuu|1>aNBSoLt}p?^lPLq?f&uj`>oN=e)~GPbw8&=J^anc6~Oqtr_T9 z-1e>gk{t=W8TSApG3r)D4kg15(cVJ$sw+g7?*!&VU5<;!_iha3jVIk<6 z3RzLf|06|2a*-eQ)@fb4CPixu?g955y6jT(x1=YMp)dUhHy^v4xa|0mmj6RWa(P3b z*q~EGxn*ptw0yMR2N=TRp`BCV-Op;asuF&(x@Rt8?B|( zPJY+5VL^I(H*byAqanPSiU-+!#Su-(>Q4xlK8GlU2`^BdFvuR-r@cpR7}=uo-s9Qh z;$W!+%(46~-nMb)F79ixZGC8xtzQ~zMyjo6rhDK@<-I|x~RGCV31TCy=xR*RNf{AvX zOs|7G++H=UcqyDgVZ_G>r|3`cN3P}RMd=(?^InZ_d_vO+;flH z(Xz)Xtv!6$;3Oqjub%j+Wjv9>qqAw<_LWl?Cb~cG@gv*9$yKjyh1!p=PU)qg$T}3Y zihd7dMIpRsQiE07K6hWW64F^}S_TYio)`+u<^$&;OvLJ3_Oer-jvpir1A$93ZMpoz|dowL|xo8$mE!W*O zNV8+8uSKo}WJ5I@@Uh~DIA4p>Hl0yQF{dvNkyIp_@n3PWQ@KAnMOIQpm|qo;U--X7 zXWE@)lF}M9PM$6AUTwt}-+O`tUp)IQ%P*)1T}s)4%hKD6=!{TpI37rJmZrSQ7)eb-AW_Z{=W;au|No_=M-<*bD=E~d)9yMFB9cQaioG?xAF zkE{wQf00F}5HEf-5rE7lpQ$5zk{W9)#$xZ(7DRrZW^7z)Nm?b-u;y^6ZDJACxHK)i zs)Pc)R#bgRXR+zOG53z)m2O$McFc-x8x`BOQL$~Ol2mM?;#6#_VjC6PcE$F$s(1J9 z-F^BxSAV_tkMl2iqmeo1dRMZ>eLsV|De25RGpJ4ia$8HuI!=zbc8~2=ee5l`*>Zn8 zWrJa%--EM-Ih;eDHB{iFVsYtAh75Vl*#FrMdALV4VWXX$4)E+r#rdQZ z6i|_)p%y2W^V(bCeVHGksqHC>qf`A5Cpr=9#GOPYZQu2zQTf#9)kj_Mg7^ovUa7$J zlqcQ2K7Od`Zw-O@;m&a9do%IThgeDLqXl`wty7$Gd4IGa)5~eVkHZVSt*0HI_X~M9 zT3KqZeswkmFj2@WqG@MLiP?J^rf5l6x+=bockfgFq=h#A;HI_{4`@TU% z?YIaNF7@=%OqaQszcukaVAh{sl+I+^7tQRo^1%@&*z%u!$VqY$=Fonx(yJ1<*zwsq z$EdR2L#7=(n?q)3ZwLGFnUi?IM^DV**}-fmO<9LbOiFXN{}ypti@BLUuqu5&8h71! zDYpv#oLJ(b_nB2=bZGo?YgfBd*Qqanc-N|FWr$v(MdCD1hBq`;3>y7=BIM01Wf^dul%b!Ulx9GI25i27CafBK#wf{&#)A*wv7O z)!xa?mdW1wpZWmjpBF0t_7puEKz(Y{6ir4r{!V5 zVQXq^&17KA$odD0oAb|`6->X}E+%FGf&Yh;ae!vVz`zEuE&i93@joQ*{;wJ!fM)-* ztNbfc9Kgl{Jhgx1Rs((+!0BUWWCO@`EPu}+{})o8$=2D%fWg_`*~P}>56AD%I~Odh zfWr=0WWW#pN4_+m7)F4-!U9M)W?}qiPXF%@1x_6`5%AtrPu%yS_qi{9?L)ckKb$g&sGoTpZ6L5Pz%6!`n!t)kPg`x0gnVg zF#zA`zb)nuI>Z0^u>9x3{5mIpOQ>fCY!@dZJHS$AV)|zU_3sPwpA{p3sr6?M?>`p? zuo8d2MKJwxSy*P0pjmJtNFhfLL7jJU3`3mPR@=d1~xG68K*ic+)FRU6VzR=&u^Luf5q| z7x3BJl=FE!I_=z$={Y;T=yXPsd)erGJ#&5=ho* zvGd7hzfh$$kJYopz52%gx_@&$H=4)Jr)jkn2rM%YPvY|f_2S5wa4G=8Pyzt7_m2t& zrpR{?fV{6{0`0i(A*@x~`lP@2XWf-`yk3s3H1d-YhqK1L=g%Y)Z0P4&vZQo-9?0o= zd+b=vyN9elyFXo>7j0-hUp}A9>AhXU$x=0oU>v5NL3ACQiLnfWht@>0^F6$Ug<|2g zn>Sq|wcvD%#7Gdddv9MSa~lsaPy`5bU?qddE@~|xZ5>aeL61Nms;*_xz@!N9H-c{q z0E=hQmI%lLWwmybDNl+*azri%%fG@ASd$aWu}k^%idCD7$7@9L0((W%_uPWAytf+N z4*P-=2)q0Menx6iPy$)B0{awMFcJ6(54bRs+tCrfM}(Ef=V*ohZ6V9&?bNRG^&;8l z?L6!KZLjM6eqyHcvr{pCC((;QL9} zD>}xtkOeQ>+ASOm$*GKYoGET*pjlWx%*Y9@v~g^Ut5p?cJ{cf_OMp!D+}PMtQ_s*! zvZ2wc61VydkvceCo_R3MeGHQE=iSpb6Wo{xBL^dUI*Dpo3=Q~D0#7tW59j=2o>NM@ zS)R91+%QMU6j|-79g}CMHBC*(3sRU}pL{6Gdumqxj6+$vmSClMs z6ZTXGQ3o3D=el~(m4kal@of=_2y`hTwtNyF)?QjN#7}tixiv^Ul`&WoB7|RUpx;~j zQ*KA=%JtBC=-6k&M+u~C!EY`8Kvs)L&V$u;ZH|z%chC(L8nHNM0zXY z42VpGrXFGrEs^`VB4viGOxiCGpr~$Sn%7h*h!V_8Kns1Z#Qu&`0=JSS9+QH;h|xfb zSDaMTAGl26iLNiJssn>4tpy8!VB!|j4)YXn`U=XhWvc|1@RHDLgRr_t#AODjA>DQD(7OGIu2#ft1@(a^tJ9v`KWH z>zXBz=bY~?~IJMgdRbCFd{;;g5`Z0(G%rbQJ*fiSP6A{S1& zG8V)(=s?c#-PTdFEuo^;y;9;eZbz-v5B0j1UDGnf_c%l45tpHqqUphQ4@1>-$lN^x zqr1mnG_H!fjf0$q@@#ofs|I&&0sV4wX!1`RRDBa8$sGoV@OS zmgUIaj6HHoSi<#?qt90qIq7lUF~Y*9>PNbcIRGOHzsB3f@L2i5wlRppFi(F|tFbEZ z!mvq;T<3b#g#Zj*e=L*7fhWfQ;H}MLhH)$};Im)-IqOV@aDRMv2eQAE2w4AFHHc2d z4GYp0Y`Gi(94X5)Rd9RKXgVVa4N;NzDS`@1^+6qsW81o~ChWMIWOe;~U;}yz$)Y97 zlQ1UdmV-pS&LMv-J%2V0k0=kw{G{uU0jKqv1YuQ^+v8k_Oz*sD3}^f177s_@`<*f}*6_pXzAo)V z#afHax^rnp!E1)JQ|wyT`D0lHR%%_v=M17AL_UzE@~_*V z@cb4`(Qs+uaKrj3csb8owJ-tM4CizU3$@)KpqVs;fjR!B5PY4dKp?e1?ZZBgQ1ke} zFy>UJ29`l5*%rQIsrm63Z!TR2qT3NSVZ% zYlG_-#>(VG+omWyNLL0zhh&5_OQK1fnU4eX8kJ($3>8>~2?$#YK4h?5=q^<}H1BQ=F+TqK1KqEDX!dbd z+6xgP7a7g@re(ZQS3ON`Gfoz_zGtqih*2z*wd>!um=#*rkF-SL5= zw-ohkW-K>CdJXB1YM@(vhu;D>(XRuBC^6QY>l z7Fm_U$&)*D3M*t0G|!h@`X>=lan81=F|v9Lah+hl_P?AQ*gtHJ5KNl zVBO=(Spf9Y_BS$2Quu7pah|4Hc-27xDby+1gi$3Il44%%6JFN)38^gEalyEm21CzK zkT%{Zfv?))pQR;E$=-Z(2_p}?6J%&;J~(MYQaG5BXL_eS!sW3Z=BdY4;AxisctulO zEk!KiUgYjdz|A^+F2T||J*%{)l9|@s5aeQZLFN+;AWd?R7A>tPbdKjI(|3T0?Vzw< zs>z@7Uiq$rr4HgP=T3oW&$I5<5V1%b{t+*6B-)VZP?(Trh(-(G7Fo z-vT^V^LCn2+SZWO7AT0RD&>@SYmcQ`#usd>ObOd1EuIj%o63P7wRUciBDUN?g792K zgWC3KM{SOQ!^VnI_WQf8(N>Ou6?q*+{F`E1k05CbJ6*KMA-S@?Orvt(6v>~U2zinb zs^Jn($G_ED&eunXR!nuq=AyBpSNA%%``?fy(tOups)#|~&@Pi5tfrs*sWbh)!xTyEWiu?>MpEr5;D6hmKg)728fUh7Qx?+<5I)YqMf{js<JM}=8{B!Jw zM_(aT0&mPoUT9QlPy#Xb#5`gHBeThUIiw<{LRw7__HlVMu0n0;&!wckGHrfrP z0hNO-b8{zYdvL@T#)HVhIPKFxo+fZE#Rr1AZNkrdOjPBn?#UEfR-V4!R@!|*0_WFI z+*IKC?L=ROHjl(JK&=w$)OToYAp_^BX`94{mm}-thIC-cqzLYCTNoN6p6fWPOO$*Y z!dJeKdNA2njDEjHtaTtWNQn_Fryye^v1Y4nAOJ(dRYK6&w3Jg*lsBXd72&|1+87$p zl({O-=0A`W(om=$!5%~EOK$ii%Ncj)esEVSZiQlWGAG!DSgwN9q3XJFt*da2Od=- z-(g(n5=vX50*p45pfMd2PMS%>h&YD=&V=+c4Rsdpt31mMeWKNv^fou~Q*osh|VDQ$fZAjPm1%e&jc#L9xjnpWVRJM!bigH^)^`(I`g4F(qpnEIm2;FTkEP z?30{w>6(xwzfStH*nGjy^b4TwMCD+1ZiPjyGea}1)G6rRCJX_n2^dL%IA=hX`b<-h6(>-@n9h$^3{MwfD^bj7~8tzr970KDcZs};dERF6{5R_lG zHwfJTE}E%Q;I|S*q;fx#%kUP>M_W;X4tH6e8!{T56RK^M@5EKU5ST6vGXmjZvMM7O zZ=kEl&=n;Yf@g@IcS{Z~O3=)=8~6MzUTk)LlTE-~3HxgBGSIcs*xD9*pyB&Z#VHDB z(KMriTm_x_$;K3u=uiR6KfA z*qjpV#><@_kj!j|iJ{3w9S@!_qOEo=8<*h`Cymn$UqjbsuFh%OR|1HiZ&WW@uDKA~ zh14=w7)p(}-SMgsxo&k8VI31zh`P$2O4j9XrFYMJDq1FOOS^IuZFXoIU%Vx^5aq^o zFK5^xVqu}KiKb&JKUDD{7#36g{Io^8np!}x{)sSU+-$-Qlk5%gOk#qgg}KawSwwDn z_*BW@ecrtYay&7Xjb+H;9c9&j;#KkSS^lNHUxMCYA|5O?n#>~~6`Tsub06ewjVZhi zRQRR#DJsS!8;>dIg;?bV;*{@UL>mathvNBDR^5hq9IH?u1>Bo81ejYBlwS+!# zSnMO`*>F>O0fLz%A{Hd{^wBTY-=Wc2Qs)wS zQ|id@1dpBt&*1{I7Zua3ydgDt4p`P|)KsLGLQNzC<7Ggi#=Pd~5|RQu3oFQj?<7E-q} za=&$j&qEOtoawYwaY@g+M6Apn5eSV^x317lb52j){+u_cB!V`eA@LiqlhwWN8v@!|N9WLXUD6CPY83=*1 zKAn+B3*c>?LO6woM6DX~{yECW)cGm- z8#!S+ZOE0l0r6eziy*w{D+}fG_;sC zLT06EQqRW4rbN|ECB|6)o|==v=`yxykS4A*%hxko9->;aop=i)A_+d!cqN!Q7P&T+ z9$cDZwoqaC0+{g0L$v1GF;SRhx~7RlP1Sw%187Mk>iCD`!TN^!*{iU7;v&odq(Lev znR&Sft>7T&#NC?@D${whJyT@X0ty%5^Cn#o_Oh}0I9(<(5};`|v!=>%v|2ugc9YYH z`j$-0A2**vxgZ~+iH|dYA7E!6hZI)#QA*XyN^(M0o=}dlPsG!!SiYnl&r}kN8Wys( zSHG1~UrALHz~tx@QOa#m5ah(La72-nq}qmntE2P5YXgfCgx*KNVk%lgtC20_#uh215`raK z-<;so4{7OB7Rpl&w-zKnK}e;H)3nE(U)6gU1e!)}3d?trMk^aD_J-2} zPaAkc$$%Yr6V&7mePl|6ms6yd5F71)zWRzhT^1OnD=~A7A^P=V2PdXLNGU`4458Y3 zZr6AXx;K>1Qbpe(Y`5D+w$=oasZ|fH@-ffaKNj~Omg?9b0^F*Dpf8q}V%uwDO`KwwP%dlMbi#%bO&k5o>mVRvC63V z@zc0^4Qh)~bDJU6?BLZ!!ATca`xi7W=(oA12j;0@PsVx37fB9>%;PmxF~_Q`@ZsIzC+_+MTeWDSD)vIi!0ITJ0w3Za&ze^d5QRNq9Zudviw{E2fQ+u z&W4zP7A3C3V^5=@4@d7;0Zt8rcM{9`gA`hr>27oI^~SJfkzkbs}wC=H)JthcN-am7;kSHjM1(E10L zL)lX!+Rf!lt3nj$cw)|X8U(~4+M;iUnFx_yr2&gV=k>b4^X%M5ar>fII*LwxMm~gx zD}L<4VwbWlXKQ_&kr=4FVDQppWV$;ff_{bYR)#;wjee+Y&Bw{5ze;zkK~dRk*qwIe z#bfIw4rYp|0u?cI_#~EwoBm|6{3XZS8!JCQo_ zk?8KI3TGXm0$#>T03q+HP8SBG4A3U4Lt@mi2j!>brq)Il?s-_=rU}Q>!9XX1?U@$m z(X;e1r>z6U)RoTda7qd#9QFc0^D6KMcFiku8dlLHnY0V#6(4BiIA%*FBy|k6(g1e< zH!QUF$mkQgLDoxXOiGnPqUEoZj;QBRQK48WE=*R8b7>>eoSvd6Q)o3ZA*_|edo-d| z_TTlwbF&NkhZM%qCCuHQrMQ1p#&U7#L$QvP@SPx+$C4RRh>|9H86= ze|2nt9R-;N2}E>2ni$svah<*jZ~fLSW3_{EwSWRr8*mIMb~FL_K425N(s7 z1=*LTO$ed_x`am-dvn@v3Qb(5lcYl58T4A`hiNCSR}v&yS~hjQRJ;;uFiy@ZM=im$ zO0C*nL7;UqhT>!yHn=)ChqmU4VY()JMpkx=d~xEQ5C~~l>GR#b8fAyTHd7)vjT}&; z;8`M-qy=~3%wKILor4m2fO4_mfoeDer8P9PlThOiWz}Tg?l~HmrKv7~TeIo@;Jgc3 zGBkJDOIsqzb1aP;{e)&O#j(%%6O&k~HykvzR^I!#$wY7vY4W?c@F9TF6Q+@yiJ9&u z%?%S&af<&odY^I3PSLxHXT5*(lqdo_4R;wPYCkp>N2Zh&*4lh=ycQR9$qm2y!Y$>j z;wvq^tdb{CWtq@+lbK{@VAJZL8;b0@lM9?2U!4Q{xR?qmT27T&|H;mShPAHdyi%AY-L;{IY>*p%?Lprm*`czNx z#r}q7WL8@qTH}&$IpqmU`sy&J4tTf>r0sssp>dL$t3o2Hzzkujqua5#yd&RuEw9WL z_Za(8_h0k|;-=0qLL={W!|GE}Nyb}{LahBkky#UoVgQ=D(|~C0yCZ%3JptUV$ZD-b z5{arGM+|q>cO}jMB74y^Et?;<+2rhbenC2wPekx&&YY?XPN?IgctA5f;@g}a-vNJT zE6aKd<)<|q}n*EhELsq0K+LLabpxcxY zcd=2FLf}-G0PLFsvnQ7v35*W^?lXxw#?Sb2EII~1XMQbo-R=GyVrjZdlf1%8emBuXdkQGZTIUixKio6$em%GI z@iH>IxEhzYqrZ7Se*(tJXi3r9;Tq4&HOf}@>k%BbBTSwq*4a}H0`e;rki+mleby@q)fIo3_r{Jw8bpqg#Q zxZ135j~)!(f&M|BTGb(-rEbggtH@D;lKZ(`RxB6(5bOGvPXhxw>ttWa!wxCHY1a32 z^rm^&Z3bX4_bGHSW?0|)cUs$+2sTpHSf1=KO$PlkXI#{Sdt(MM49}LfukL4VM>oVV zo`iRPd(+1V%GJe3uFfaZ$LDSC=hYnO`fYyO!>jY{@xdzeBki{X{;W#fSKN%w+vlad z6Ub_D1N+A==f@|%X@uLW`<=P6tb{aCoa^nz*Mbv*0q@Y{PP{7J_qGqxpD(X3kGl%r z5o*AB8~;xN5D+H%&-VYHCTaj<+<#+42$=v30t*KN1AqVo1kwI0H}Z?R`_E~mj0}Gz zYy`lr{tg>418QJp1q4JBvi~6i`=8X^e`Wv~8U9Mzh@Ii@WDy}VfMQ_NK0 z>33-4*U}ifnb;Yyx!JKfnF117e{Gqap(P9>!(WLTu>o?Hei8{Xb~aPuhe2@MgT3x#>W0{#|FUn{AXH@k@2s@jo1Jj;cti< zGXU7*_}kzF07m$)fZDHpGBjniaAaU{GhuSI`NKXj{spiR0BL0V8@tB}fWdx4)R=z7 z@H4OhGPYTn{|Pnzx5?9gU^Qh-44j+=U7XGB94(wbfBY4APWNlffrX8|wTX?1t+Ro% zg`MrM>xLg0j5(M&*^L+&IhhQZOgNaCj2Kzi0YUl(Oq?u?CLD&G491)+Cd>>U?d(k) z4V>*9Ke`%NTNwZCEfZJ3_tG;rur;A$uI`;5irDo za)j%9^dU5Mv00n5R{ZXKpPJg-<*r?7sfg+phnhkAdXF=yx3qJ_@8k2l!JZ&=@BEvxZ7g9# zI7xXGe$=6cEyit&$o!hmPCVU^bC@5@1{h!12G}i|t>3W+UnM)G$QZQ20{>#ogLpN= zJW(WkojQB-yp3*P&~Oy`i7KD~tq+V=HGp~8jJVlBMfY=la-QWllsT?gd~>*7lw>pV zhhfrWq0Cq6ciS6??7@-8B3w!9QF9To65mm4!*^9qlxQQUNRyruwj*Pv0LNQn|D&Df z6@pGAO&xZzyp8ueh=d-db&dKU{nxi=9Db6FzId37j`xKly>gc$J^79IXS!6wgaWwg z*K38RgK-LVLoCP_asEwZm~QU=t_$U)BeD<%XQy^bC#_+0f9%k;J-HCUTaglc#{6B{FQr9(h4?2)PBv=;yl%ax#Q8Vjd zb`2F!%KqXH4XZa$QNtMkGUYAcEIIZ3*u{=E=_@l0{N-E5m4KWd2ZBGy;Uq%#$0J&J z`p|3`EkdT6SjTzLgSt@*2RA^BHHfCR`c8dGhaL@V%xO1lg3IM6hF< z>F#6_bqK%5W)9jMk^s6>M?goUN-Z3OOi>&Ru}Dkwfqw4o6!kB4QWuDyP^v8nR6%0f zd#Sr&VY-D4ZU;8BDZxJ2Go_g4g-O)RK&i*vDMQy3iPlfGhH8N|OHirhR^0ocNurb9 znnrSD&Dz(1zG==fr1DR_DfND=3WoH;s|@~j-t2dSPMYEHOen7tjk$$AVJ0b@(Uw;4 znNKF>Ltmo9%*C`FmQr1bq$79+A7E3Tor5V&5&mh&254X_>&K-9na}7x@I!{ag@;yod8BQbZaZs6qvp zGp>*hDmBOI5}~BTr{g#PO|0J=Zy_71e|YFK2fn1x5M+kQB5} z54m+H3SpACW!}|ZTM9@ zqIXC!g`zcc*1a@|r0DSB^9ao*{p7*kWJ7nlJG|qS85c9~f!k<|pCva&GZcsU%r8!s zT{JwQM0Q89Pd1bs^KPss9WZLIgL#6AxBGzCs8oPFju|N* zUR~hc$z^qeGsXoI@`MudzAI;Lf0TWzP4m*;+e*`3PBo?K6awkI$LXS&sF$zcK^RZV z$WwpN5dZ9fIEjkDD2z9A-oEO$jmr^Lo8W(w>)DXxXxGHeRfSKd2=7p72q_YBr^AlI zwPRF7?#9n=fr0dV)L2o7Pwp=y>eG+~h1`dl=g(iz)h&@y|FVgM=8h!sS^dO%Hq_(f zrxL*%%-~6R?8+R>;j2o8ek%r4z=4nU7M%m>HjGgih8X`>h)D2hZ=IYWpxe|Y5F~YS zGXpS$b)6V#Jt%OL^bt^kgG7Ha)-ZH(WsFMHpU4XG_VI(TF77ryAi=??U^N@&YfUtJ zwHbjQ*g!r=kg1tHdd%ws1*iNNmlN%T^QLQyoKIi|Z#zTws4#RF`TI^sZ~m;ZRfQcGChS3*2X= z`nM&^Z1Gy*+~LRtXOXN=xx5*(ynXj8IBeN65LibGP-FFpE}@=%i090w%}Nvz4uddt zPBII6t9~@G&}L2wUAKHnO5q<0K=kw>Q4>@-xqF+g!z2=E8eo-7ZpXnBcyMx59lvh` zYoyQ+a`UP;(aZPjx2ysqR3IA=OJ?f@OI~ekc`1h96w{KEMIXteotDH)3Enf#R5n=% zpwsW=y@R=jxiUGC$4!v*VZey2W0fH7MVWMj7Uwp?adI9ul--UWBick`Qao3z?LS08 zWopFjh=I6M!E0$As%DgLa^EP5#*mDVX;aJ!F#JRqmI)@5MU=_t*whN*AR7%y{5fPz z&`TjeVoQCm1U<;D^>s&O#~Q}o4~=v@Aa6|WTU@)lyU|H#_C(M`GB?z&QtZar5cJWM zSy&e+)^3fs9&Xgywo)eW#yJ5bt(4z(l_F~MS#}q#TXQnN&J{u5i&S}rqhLK0A~dfA zPuz(QLG1N+Pf8fP=aRy+iv51zFdQy7YS%SR^w=_vu;>g%$PXRhg*BccHQe=uLah6m z2=0@`W*GyJUjD}pNl;>yY20Qg&@WS>uqu++7t`{RC+^21{ZYt@#C8=NA@$Li1s}DU zEAxj%>qog4Wm+BHM1R!vVSj~K5Aur-1b$Yjm(hR;9!?5oe*Ea`aN_A}75~nR_9AIfSkG!6oRPy}Z z%2W}?xLc9?Bp$6gtkG>HPgYh$pWnk{qlkdXr)ka3MT&R9dF!2+oJZk$QR%HqpEb>zEXG$) zf9_30QZ1ykn;RaImfJL~rsRK#Wca!&CbuywX@dd7$=vHRMSkMM`#yfvgCJUcg{1Wws=I0BmSM9S&3=pcE{ z1KZm~sS)=mxf~>=g!5J#F(Xk>!&uP~G++~tcdYC=J2pdlv8N*um3sw6<~)f8S@nN*M}=w2uRKVj8CrOVj!yt!ea=|I}P#A({UB2%1&B-YQdu8 zd5r{IFqOWm`H;n+?1V-2A_x{NiyU8ao40Bp(rz0s|7?Z`b?4g(m6tgi*F#5I?RMWw zIp*}W{7{)WtLHtHQ+Lv=sQ#wo5BtgeFLj@OtnTxB1UdS+dNtz5GzNQXPr+A^AF9D( z#rTowEB2)7Ki%iNJI zBwKXeq`==m?&G#E0dmw zih#pvJEL!KQlvcrDwrll(kV{~UxcFC2hUwkr&>dCc2~n*#Gz@jwf8nT>#(5(Gtt${ zL?v`i?%~f0u`5L~8s+r-rb?HBaW&2#&wHA$5=x+Fx|^* zxrIk1x{ght2IG4EH;{=w!yamVbPJC>Azu*fm4c?$<0iGKKDsH3=mma@cx%zhIG=(0 zT-*(QT22(DU({4nH@oe)Nm$4^)|r+x4Pa~Mp|#Ui4Xu%M`e`PgPj$Z; z3=7feB4L%eOq8<^M0t(pOI~x7!uPqHnl%P)&JW7n3YJMJs<|YR(&Qqf`Hy%g>;ia> zYZRx^WxLkpk1w^v?8=|{WQEoRsV2%?!B6F{$=XwXRAfwc?4M$3M#M$@ij00Lg-$Z`!?}>v!c^5e zR;~1=J~Sqz46gKc8)sFJD@|oMF6;6Ut#o*{Cft5yNGs!zy;@Q%!tf#sb_(wbcOOoM z@>34a(+>--+XhK8O)uf()0~JR#SUOOX`a?+OxC^#70!iGD@EVo%n?@Zx!{UV)Jm-@ zDhibdzsk9NyTd!j_4&p<(d64tWO-Fx%HC|Au4%)@E9|y@;fs#tNXDfeX?tv_5wlla zjBq^??IM`tA%syAFDzSV?4%ornSvxKU{aNQ^`oueUNBe076RYwNRr@n+~MgZDuPPF zGn-;|tR*)_rzdVd<*<5r*>kDdwS2=lEQOB#jPny)_FCr_hpb#DE38v-+UnKOw3%Zk z{=(rM3a$p(pqnn3tdfSOaVKYxfvCKnO*4gWy^XKP-JqMw^-w(&Ovq_3#iJc7MKiaT+T&_VvKtr_I=aIn%m-Dg8PL?;GM$9#tsR7~(}_Y`CFmBz6#WLKf4+?4My zn^$~3SdX4ITV3LT0%M`3b^ug^_DpZprm3d=;sRog6AQKZhvK6bGJ9?o33!x}${&2mH2573x6yjF{!ysNSvR>b&x zV>}3|xAS0WBqFXSZ&!oZ+7&{JMu9@-;@%4w^ay((`+#z14Y>>)aZN0zFOT2Bz+_t+ zb_r2}<`0m2_gh`ubf;oX8>i#VRvph<*<`hfU-*tse&9FD6sos=YhC=VdbYL(yBu_;V(?=(M+NB`*-Gvz78Kj!sN!(-sdRo#q z#a%6)uFN>lA;-v@;)bRmWT$E7D^COfyF@oyBOob^LmP%n@ezX5xXy6_9yk$x&a!ho_yD_;jF##A1 zd61@!Jg7y~F1{&+5IH^3a1qC{w&LExx}c{hI?SP3TDN<2M4KyeDBCzpAk^gUEv~xq zR?0C;V?V(>IVI|jg=~1Q;}m_d!dwNNO{LtdsY~_9O*ZA)#xWxYyk!42D)Z|nwbA7( zjpTqDg`Y^0VK}f_JPDKb*SAM=O>%ytcBG$zE(xJ&Ei=ycD%z>dj!mEKa^s9OQfosHa>Sg1c|dMTVUaY#(Q3LHQ-#;|)`jq*Cf~zkv3mWXisw6QOuDUR zyz&}rg13~m&AK+*i0*>Qg!r(!^SRxq4!YQIy(rn4BLS*|hnr048xuxK*ipNDY97(` z2Ip7#)IpTWc;%Eu9i9mt2ap%WH1c}C(3#7+!k1`!uHK1>xkKEEz(lwtR4?Ut_V&|{ zq!CO}&8;;ypwwJk}_f{c)5Wd=#MYj%5lzd9ni5>vbv@#D#@mdh;_kb`P$@0 zg~UU&=vBO;xGKW&4kV}vJ3QbQNDyG|n)(yDMY&Z%p~t>=gq@^kZY|@g^#BJ0{!DSh z{4QRmuAt+Ib|0*TayGd4Pz=f1znFMoJ|{_Rq@d9ja7dv*I_f8&mI0H z6@Ps=YX6*=Jlr~d>XbKweARdl!nDDYNw?YuYEZ>j?^d?k4bWxZ%KCqbJCdia*nsakD4yvqV2KNS3%NK?ZiNu2=GDz zSAO-6%eVMZL!#Q@g?#B%_b%FnM=XMy$BgSBkX$AbDgrTe9aeW=2)gZstiUpkl0g z9Y4Tl$oC2mLHd+G$jvF;nHx@PCtsPrV@T$I%^v=2`_ZRLMScz?#X_FZf|{J|*iL*t z7uLM-Q^eTk>|_hfyRYUHP!)iea^=FDkM#f}ph9N&jq$!c^vm&!j|Zi`u2~$Ogh=Dj z8Jk^jDogFcAK*jx?;u0W|3gU4E3bCEA0z#9c8MC!075tN3nOCsl;PrNQzhrzD z#7tz6McKzg^fOtt>3^ z8*&TD$H9OLD=PsWyT=HIs)Vn{@hG1>`Sj8=qid^aR|*kN%h~i;2v@?!7Dwaf&}_kp z+--1T*oBNw+vs0hNlp)v(eu&q%xHg|F^2RQ7_M6^-V;WK=2Dn+#DYN!{n{jZ@)K8% zJy~CXB$-Fbxz34UQen?&KdJvXclr5u9GRfauv--dEx$4m*90t;3?oE#`hJ;r^)Wow zK0=TzCBrYt&pfam5xBX55%`Xyr^%2IxCcL*1-x;qxwe*c#8F^p_hRFnBui_=@p9;+ z2KN=md$%WdgN^Y#M9`MmQ}EJcn$2y*9UH-!FHB;PPN0BAwXldg1m~*k`@vb|e~Q<# zN$B?j%Px_zpJYODg#%5LatlDtw}+{j&mc%LhRyd8mhxJ3Xk>k?*flvOZ%tRWQzIUe z2HnPp;5d*SQew(*{0N>Ye`U-OMIp7LfQ5ZRP+kbwB5*L9KF!1lbr=k%n045oioivW2qi6hL0?m+!nqbq;@<=N z$xh?A5+E^2MrffcQWpI9zE$voBq;QR>ZLjoqm8dDgOB0z@Byp6vwA4yUW(LINg%@o z@!6UOBrFYPh(r-+F{78*t6p0=YZ>XouE=aHhb5`HALWEujqayFPT_@Q1k2oVgk~_l zI7t)wkqq11o<{0Dm9+^)js$1l>L1BuMX7lEScd9qq~UlYC!-ai;LXwYTk_q!LAZ{>ex`q^}M#Rrz&UrnJ#n{=Mv%s#(fzoc zQy~?;^hF=ladrHr@teA~tFyi>V@I<`9loOv{6GyaLy^=k6m=&V&6v|cGv{_*P(pV$ zZSF7pcJDPnjf)1`==Rba2T!)3?1Zl=Cid-cW|Rjpc7CvkA*ouM;DOgB1S>$Mkpqic z7(Ia-TbHuFyztEIz(21A#7=-+nf<$VAwIOWf&n{FD%$w7KQ4X4TRFl&IXuJ|vDX*^ zqNna6gGnbVvn$dIdE{|S_r~(Q2G${W6hP{jf^;SYy2f^XGnJ@z@u@sQBL_;pS^hxy zZna(X!~5s~1JwP|IJ)b47wqlB?1ov))PAM{7LUBAYhggdLeXGyl{iloy>{H zyJ@hl$J=Y^nVGVMHGxEoDtY-TU5d=(YM;lGU)TKlwxaIMVygHzA+|qIYkM>BcDxb7 zool!8^R;_?o0@-}><4_FU;G;Z&Sx6S|G-Hw{so-G?`>T+K-2xV9`mm;!pwl=5kSKm zkeT$4CiDNpZCw{<3u~vpwR71R{;ik!=h+QL7FK#Db~Z-9gkn}s0HMIa$;Ju@9sz(8 z46J|+07N_ecI|gU0l)7$d4ful2>k)5rxqk)k#;C(08zke2q zy^Eo>g%O>jiJ66yvx(#HO?JB9K44|?neHDQcnf1EIwx}jM-wMH0~wr$(CZQJH*+g@$kT5a35?cV+S&(5AXb9Q!4W={6kw~|WgNhOuk z^<4K&;}!7v`c=?geR_5hU3q3C?gSwoRDC)&GSXVp2p;5jP5qU~gZuSyKfj&Zj`Ak= z_j_N+Ysc^Vaed%6Rqy-lVbgANx8~b@@A%J4P}2ci&)3Vt{QUJT*cWrs45+V#SU~Q_ zEnAqLtS+9;aRI|Ho^}zf=6?Us(3T5t^4IYU==yg8e7xZC%>Z3GAwtS%TLaB_=XvGt z&)+)K#^=z<{XNapKT=o8#P^3+c-`=v7WJ=a-LrWIt0* zF~FDbuxURIZ@Y={FnqY&?_&x;97>8iw>R)5-wXUfIP4aNlv4qZEBqH)5TU6lc<(cO z@8_`Z&w09^2GY%UzVG|*uZNatzOQdS`0v|!zwhJSpZiSH;pb?Jk%nMwJaqkT``-Gy z?E`?3lW={@%x?fj4xn9!@)!Il5gRY~Kq}hzZMXrqH;NX-`V*z|@r?qerXOi2W=#~}>WAhI;+7X4O|sC4Mmv94lt2L}@W$%q*M z8*;A~+YSctu(P7yjrSnHN+k+VP0;`(pFt^59-e)uxec!FyQ4OU<2ZH$J31SVf2a6_ zPBUVsydKHxJ|(}@Fr~?mM2?<`v_hVUH$k`M4v!TVH2I~p)Y9S;gvDw|8YEC_`>4Q& z0VXoQ>I8C-H^nEkxhWbpFC;>gR-J`r&~~wgF@hn2!~_t-B06R;2E_Ee#pInn;Sh*7 zq5-ZHuj*`*RWd(9Era~0{GG#dTyz{^=(NKekB1pxgj?Ic+*}(hPe7He43oG4O(sw-ADiRXAJ7I}wgqJ`IJ9L#B)BX5c#wRM7 zjHdP&bUkWS0-GJuDnbf7OLWTCFo$!S*vYw^Y@LO0;t6h&Kt0^aJrO4^!GT=AKd>hs z2G&hFb%!UB}J=M%-10gA~xn;EPN!8p1oa0g+W z1+a9FmU?;0LbqL@Lk&7uPn3T0gW5(2;W{>}E$A0&i`d;>_K@mxf4*==>>Uy&UNwB( z=~HYsO`yu(Ata|{Y+ISp$m#guTlzRjf#dO}J=Iwkop3O**2R>9uE`vzRm`}Z=#Zo7jCI2MaMg!49Z zisJodZcP&Ofn2>ML+z(do_e=Ng}@f61J|!XP%WM|wV%?qv}9GZv7*4*Z{`>>S|dBb+po+js*fT-ymQ2$CnvtQCV0L6$o4+o5#D65=l5P z1I@1gdO+G(MK^ zc9ukH{V~2ci3O|$6}FIWe1~<8P1q=nw|r_6`=vg~F}tKv_QVso?tr~q+XEkT^WFLs zhFIh?1pVL;b#ZG%qZYDDFLKM3hE>wxIFZVM5Y{;*EbKJvSLMtllJv$Tu}g%uxM~A- z_D)z3yosUA=1+OEl~|ek1g8(C_o-k6lyr}YP=TeMAj*q)(NZ`%F5=$GOCb&-XATvN z>GZ88=qo4gG~$I$Sx#-xF>#P6vvWrJvfo^`+nHjlw8moB;OH?dD4I*fW`a*DKvDlv z0xWvGA90POtGuuTP4}6GnJO1X6T^`*TVSW`DjOueIy~76PUUkd!-^EGC3Ajv-Y@VlS<&y?vONYKNAho!e!kFE&MP zolr?_ZAa!IyEJlcga+DrCaPivaWk%$ewaO!t1tdpF)GkK@1ua>U$`9JoS3(sO!xI| za`ZSHDq1cPF4CCVx$Dd>=&G!z!n;ZizqC*mW$(#)C+_VDP~vYc8M>h1z+eBG@E#~K zD-=e+cgij%$5KjH`KUHURmj)zlEL+Wd+?Gn7PBzHm}C=Cw7|F!ql#f}xqZ z9YRM_2c<;`Y3T}Re&S?A**z0jSeOjQ1^kWt%K|>nq5(HPF(B#g(Qtn6$EiAv36xw* zG`L(PFd0m-K0)eS;&g*0T;CYl3sM^p?+Q!k{S@}I1zLyQ?fG&jIdUx9mkrrqagH)UI3UEcejJB<5?SX;3 z+PCM)we-QsT`m)eFwv=gHaw;^Mwd2$HAWR31y!e_RxpDqU-K)|wM*Pu-jL9JMLk1! zdp2^{&{F-jDE=}PH@4kupI^D}k5EP`E@~=%D&`+lLY-pn;sje2cAOzh|7TEhhs#>+SXU)gB<$X0OG=?R-ju2 z3Q@w)BcM>Kp=|2sJQRA<&oo_8wq=1b>bFH_7*oP%ds+`U;c~@1j}q2P&y+XW=Vm%y zCxP7j^6t1+g~8JHV%LsAXZvgil&6u2mV~sDKqfg+gIHEJPu;GqAs5ISlMM9St}Cjd z$KEYmdW3V{&7-yRb~RAgT<l_&LpH>iJXO|+r^eHjp(Lb zy8TD_&+vgUg5gxCnv?e)I1Q8Z$;>v=ik{L_Y!N=>hRD$i=fBME*QnTfs+|u_YKT@u zJP%?M0S@B4NvP~ad)OR@E;XU`|7hegw^hSZ@Zdt+u(uki?k8QrVA;3u!%R^z^Lw#6 z+ED?!g^B%=NwPM>n|R~ZKSvF?EB`ff#CxDwDH*?XR7M0M1Yx zy85^A?}*BgFb)it9<#wzZRFpeX^P zf~zDZ!rlEM3FGZLwA#6|1TO#N8bxlGzT-%P_|vw+X_tl)4fS5axIbZq<{96riZ=>{ zYGSD{ewTWuSSAT@hSY;)wOW}<`i@i)a5k$B3a+YUA8g7~kSPARIuc%-Me7YQ*c5IZ zJ-|exf|u74A>o;;Gng~FEJWaHRE0Fv*D+3LK&*92Ng;kQ3WR1rVPh;NiCp6fxT{kK z2&iaNO;IFE)UEmYqL_o=cA`03PId!=i8}s?(op$?Ll(v+PE-L`Iv17Wk!W?UP_4RE zxi~9x>43c8>dKKvFdg1$w#H=sxi7UA(-Q%2 z@|f;w1`03>*6?z4^)s;znI+Y1h&&=W)__VP3UuoeCzt4u;l7cL%_Bajd1rXU{^TJG zpYN~?=*95xLu)1#yU`uaR9446{i-E9sGfy9-7^FCl-*Z3AKaAl!QkoKyn;5+BD>#gL!(Z$MJrqNh)Nujji zDA2xrTg?nV`9Fr4k8q)l)qa{4U`~4M>BlSnT=6csQ@$~c7PUjx_xae=d?N}mBwVcW z%F-r8*dlgRo7yZUmOO2A+@E39RotI(!PNvR>*q%_42aeH!=IeObmZvDuyo{t+?KXa zT9(VZKsqC;Ew@#!Tuf>=+??GtbvRwzho6coJF34TKT0-K8{5bIO4!~2cba6L3vIs0 zJ~H5Y39C1CZzR7IE0C3&__r`@l4R-c~yGt~vl#)?kwH>-ScQJG3O zFkq}~lc2?;;$l%%hLC)Zt$muX?0nuNhH%DkG?rzbH>o89acm3Q=no-wl%kukL;@E# zWU6MiJ~d7ZQG%~qHgaNJflxHv!UP7VqJ!LM80I@oCc>?JRX+EriWI|s*F!q+#iT2}9C*8{wdYsSshF4*> z*Nusn*&@n{=;w7;l=GW`ODMn~_z(_@%e9_4{f5{nZm=8;m~kRoJY*!PM8M zkZ>dRxls)0ukU_MIbPCZJcK>Q+jYkU>KnbwQgVb~av8Q-2>G9RB#03p6)d89rrr)H z74HF;)7Y}c1+Z7b$5wFVz?1i14+WP4HM5ARX7=dP^dEPe&{$X|u6IG3_<*Yh=7mR& z1Jz)1udf^8y1%85zu5i^)yj7J;VhVM-(T%z9X6 zi?)`2ki?}cSikWB82;|b+6{IO92tBcikb@-&(V)a47J5)b>jX82+0CmFK?!HV>c#E zd<*LHyS`Qrf^IfXt*xQ^qXT>fXck_JUw#8uV<<4V%)APz?`8;BjlK(~PtPGN?ve~I z76fmMF2icutR&jp`zBvEyfn+mXmz?=lzw-Cz!84Ks{T|{^t%)J+`Sn_A^%M^)Piq^ zxjv6bO*VBGh|a&08lPGTH1XCwNDGxG`*kWBLyDW&tfWYw>UJFi7&^)VXBqxhr(+;;6aT5s zyq&6aPsA$4?VjDKibm%ejZ_WNy@uP!!RSt8gcu}=g0}WrTlA(J6es0wXr)GzLg|QC zu%gU8PQAib9cKU5v!ttIh!8Q;wW?|9ADTNC6?ZR>Hd00WK3cn*;S)={xHr6tuFv-r zG961wIyN>=m19y5c3x5|Qvtg&0gLV=vuhD#oZsLK3LBdtu7o+|wifX^C?+&i@Olxm z7J}to*BnuxOVKUAp|>2Lw%!U_lWMzm%!NorF5=6Lqp>_Wv26{OHV&MbFa^9(BdKVB zJ+701oHsnl9ULv{01J5+F>IZs-QGEOZ9<^{C!SUHF;dI#$Ww>kahH{ z@QL&)vyGwHX({d~r>A0@)ShC&x6OUTi7=}K#MEsyl^k^kd!Tz7lkvuC+2um-0D+n5 zPmidM7`NFnIprejgY-pL6UIfRVt1nkd>E+mmUL5oP2!=w?(P^ZBB&{b7IKa z0RA&X9Xa-_W1hHjWFgDJZP==haGRAcTyHAk5MFju;{b1K-pt z!u85xsJ9I`_EIlrfJ=+* zLwmyom(WBbd|bVYUzk;)wW{`96PJ0Mza|C_6f?7!%UaF(md`FgKx@}g#_}c)CM|%c z{OA4~D$s{Tq+W$mr>>S)K9-shY9V`iMsO88kb{o61_OaWK4}0H=P{( zzHY@|lC-u(AXoLZS-h{-+s+OyA8%^h;G!d}5Wmvf&b~gVqx=H+%qi?VkZ4QxXPvNn zU3*VPp6c@R_1f)1yM8#-Wg8#tr;fY{<2|8sww@UHE@$=wd@|PV7@lSF^YLgQpymjd@iezaHCE z0rFoSO6JDqSd5w$syK=Uji%q>ngNvFZ_avlPGa_aDU*sM@T&YQR|uaR6=v5`5c6pJ zvA_*CGlk+D+3^Pumx(Fg#U*dkJqGp*N4nvTaw&|pmfv4;s*eWV##u}T2%0)0qEu)c z5k#yib2szJAIJRi%yy3wny{0>1yU9V=G_bFF&s=)(FB!&H^_p-m%o5}#BSP@HyQ;? ziVj8gfspzhsY@qE=iGFO_xN>^7%vPSxUM^$ff@A$NEmfObcZs2^C#GD8Dl6$iUGSJ zGA(LDK?tT#+c7}u)G{11w0Ey&$G+49q}2Q4?kBE<;~pkLWJ-{XJHl=yd}!$(2u!%h z37v0pKIh(^JMM9Q&Wte3dm8A}VBx@!d-iwKC4Ch)d`ish|zfnY?0yzfM%< zQHyVE6W1F_lu<9WF~*Wk!x>qt%`h<}kzYRpX7ff(K{}g(A<`6fq{TdGMM9_4!j>MD z%lgBB;Ro$16ZFgorE28`HaEiE5gGZagi=z@$*T$tIUo!aVS%Tj@8&pgrYYfEMfH9( z`{Vq+(j9CX0-PGzfPyNDv*q?lXBi>2Yeel~eV8sIx#hRJZOf~b5WC>YyY z0&RtrwGBOE;PPsm2lZ-E?q5b8`NOmLbT^Drx(j8rD@Vc$Ge!S+_BN}e4OOe;jipWm zcUY!2TfM(eQ1A+g~)*Jeb&Tr!_v0 ze@&ugTubGq)>OLWeMrPp;y6wfwJcDa0H8SylRx>KKoS#=oxD}Pn0W1Px*Qie%Q5GF z30)sy*}ulyutJkrFzHfQg_riU59XhZBV3>j;8m)_=pp!eaw`Ouv+})rHv1l0t z>8Bb`q6FODd`fHdQj?=5`+0c^kA5NT*yiH8hX`OkUEL$U>X5Yef(@(> zoc;R2h=6@1#}x*ecK31`V87ey*YokuS&4XcfO`n!H7S8C!|i*iA7J9kbJhO_ivE-@Ew+QaAb z4(|5${>?pmulzBJaW32A{rxa7kADmH#+;M~)TO-2FZcD)F+Yt?_gl8DtcjHjKPFPN z!``XRE=jKV-AS>}neRZUQGnxvfvkcIBF#imzQiQmF6p-J-N+QWjh(vDvSA6e&vd$m zW(@s&Zgg;M%i2D$%f(JcoiF8p#4bhwrefd-R+qxvYDrMtW@gMRkr?AfG*V5kUXs*` z%H66y#22L#wWp|nN#ssZUJ|W~IY}5Iyr3nN9E1g-=M?EEY^dk?BM~~=TYktNFIghb z*`z?Oarkb)qAQh7ZrfMZU!Wp3aqe^+Ud}XuOCiKMtKsS4u$1ggnhM;|b*G-rkLy$S z{CIjhyZhriqTm zA>J91bt7ps#7)hBYv)w&)k7Z4dRrT_unvqaxEg5R@KuP$YC>SHd{xjD9zQwzb=A~7 z@bWuxQ}pvVmJ1n&cUB^?DOQ-G;(o=Kj7 z5c1OOFg^mbAW{H;NTuYUW-%OaOA`JFWf}%cx1L|GA@g`1>7j`Z=`CSU`!VN0nugDNnM%KlAycGy=1N*&sN!8)KxCW9?YV`=C!YaXG zj$%MjTp+v(R{+9R?YU^J4+3KVha0Ag=)kQX>)C>bfHB-zc6Mnt#gil{(PbDMFvc5e zA%%5{7E~3LHBFzB9}&mCs8T&4e=^<%O&d{M6pf1!@Fcvv7r8AKBv#at(cPoyL&FXw z8m_rLOk}RNc&R;ZLU3DEjdG@93e>NaTRa1>54#@JV1F0CmIQX|`U4tJt}E>A$58te#b|H<*-8_jC4)xFmbV#``rV4qNHT5bK2!L% z&iA>j=lk_}-Q)9h&iD09$LIU{_5J1WmWuzi|6_yuxOsYbkqlbCSQ|JB@jfc!Rj@9-D0ze&o$U&d%;Xob9Ei9#C5} zd`&ogQ-Z6+pGh@{HinCNXY8uG^#D%f__g*!-vT8@utu{L1OAlUZisY*Ze~#s@IRXX zuqp6gLzsbL5efd`JMposXM)}+NRUN`VWA8L&08^nL(FJ_s^J-c9;*X$cOZgqm#S(| zCq_$#>Q>Pk%mO7Gnes&XQiAy9eWd~8>%U~O<~=R3!tI+KJn(Wrqq4(zLlynR?$2pV z0T+iFW%3c23_CPEBGZEWG{4hj-sz3TT9ymVD)H`+Z_S>rmFI(%)9V1dv`3PBn-?0v0$`B(VJKnDtCC+Y&OH*jvpbZQ+)$HP1Mt}g>j^HWH(%SRetbXU1~g0@ z8*s3THp~;0x%QNH(Mka{a5>2_!UIS3DdS-^LDd%G?w~~f@v1rYmc@uHR2}vPCmL7#3gW(|H3BJEK?lM&lQE7M!Vec^$t9jQ`M}KbG}x&~Fjd34aCW(zEFaAl z51akS=!b%U6R-lZnAyU?ccpJwWMXg)VN?IYF z9tuIS>5G|%DW3%Cq^~=BGAq;_b%(&xSlV0_u_Cej4z}UJC6ZLi$um|nmUrLg+#()h z??51uv0jD>7WVV;z4o>6@g>1i3=b%*VPA!n7bBe+{1ML6um)=8J_$Xw-J%nXsD+y2 zCf;lfu-pFc^wjxUkl{Inq&`*sPENBk=LPNYezDff1|Dm{Q<26|ey}DgpgCK)HKw~X zKV&ccNlfi0Y|jZms0xEk?d`81FMkj1xFSon3-7BIu@zD2Hn*ksk(p~^_u651$tLf!lv zsigVJ6Sl-3|INO`$S?WqN-Tt?3&7%Az- zMey{A|&O>KlTh^2eHTCK3l1}OGRS0O=JB9kz zq!-1IguLQdNY#x#nlN}^@D`AsnJN-#mk9*#!uf)g5x@RamC?NLg&(UfS8Ug2H=Q{HBVSAL04q-?p z=E=QmUiOz5ix1J5sN~4SdEX@KO}%`8{S%Yh`785t28w*wI;w-q#*MC@AkuNk6A-?+ z6ka`_vIr#dIRW{uwaj2_EUaCk%@692W#4&$wRCMIui;kMNc_cSSg{WfsJmg3H2jNv z1iqv7cn$sSz_<}O_#AgNcl&jzQ&EC-&YQd;UfDnqj_DGAHfLvjpp89Dz1~e6lX*K$SZKK<43;Hy3s%@PWKC53TzxdiO zjOn_jy-6fYcbd8g)syKX30^8FTXhiSy_H8t)6GXbD4;m}ZfA6aurkoaN~#M4w8@KI zXygxs3*uamDF>bm`=+U%VxS6mj{qs-EBgJA8mAIhJ!`8jsUOzbHlIDuFHPvLO(GtufG81lblxO z{AI9Oe^URDt8u&3_;sSiUnu9d0RXvtO*8DBwjvz@>?dJuv5qJi$VV%Rm54Jo=oO;Z z+8)F~ALf4^1|38)m;IXBBObV+aZG+JT6%TRnFoQ6Vj;~|#pSBAR$&vO>XBmqS`|lX ze)Rq}z+i?P3rRO3wzLDke!P(&N$U)}fY^T7 z=5}(lDk9Z-ff$0K%x(d*lvErVxJGgZzMPDyq^x94tdix%LjPkz4vp0YkDYG;G-(T^ znVP6?4a-G3gn0zWrx=R6)jwtK!Del3!viyv=wH?@-AW$T*P+D?_H|PzM*1l`!uH5iy`B&jI2yXl`zh&kt9N+_cHa{vOao z1|+n49tLN4ropgx?#Y_(*cd7l%)-q?y;5g$X_Wjxu)jTRX6V*CrENzEk+>M=R1-Q%ciki!9AP93N? zTty2>BIbhMupUWkIy}l}uSv5(aZq&9JCD(*sR?5#e#83X!*O%I@sk6cj%?C4hEsW*H@-4kk?LnZ^SXo|hS_o$mKw_E$NT??froiS+umpHq zZF+d_PIt{^#swZbJWiz8pcFJCgEeVYE52cS3TY+Hdk6LUx<^a1=1T*lk+(0TtC=k~ zv&O~(_hXxXP3}f9rCw>xvG1uyC)xE*L`jt%3E{6)WHklA8jB89wLSzmMyuNX>g1Pp zyyxo!NZMAJck>#6Ue=-OH|m+iKa+lVVw#l90SPP}wCt)pHgDwBn;Xwzv<1#fK6B6* zF|buSRNl2-X|c+$@Jffn7B!@%;IVeIxm<=Hz0_$Ug2BKTMvVGgrz{Afi;|duB$xER z|8X(H0vk<>^ea~$Y3~pSS^^K{$AQ&4D}iw!jAQUt($|*@1;n;iN6c}@ByuJIbHCpQ zmQc~7KV^u?`Wquz{Lzq#q4eB?O5{wmN20cI!G^D;=_;F=hed@E*R@w|XAp`=6>wW9 zyxh_;9oED@C+vD&UnKUF9o}<<`$?X0dxWAKS*;ZB;gQ`YMrb;MEWlR8ESPA4B#h>4 zY@1Cfxhhp=72s)i{b?vuQi3kkuI5|S&0HpY?0ESIEXrNYamu+ z1(KF=DiZbcg8Foxt3o5KeG`&%<%83%gV5TlRYbKDT70D**?S_?N+pl7dxe=@RruVI zM6+bHanEH|eW!$V*sE$X3V0+%hjlaPMcmFx@8*O**pnYywEZm)hXU+8_1;f5%1}BT z5qTJ{S=NYwV1RU3XOwVP^}UZuIc*hjD!#uuSLI^+2 zCVj2O4AYEVL1RU~88eAKlVWun(|g;=j*KQMf7g~TVex*-P9Nz2b-W()`EVIGjj$$l z;)LEy(Tk=^!2u*PM4PQC9904d{~Bw$XYn^M_w?tZr!40yq6O1XdZhpi5Q#WJ(?x-+ zbqw-8@R3TrgN_b03$FMt7g{VK5NQK>hE-;^s-i-Vx^C zRy=L(jQrBuEfV$Px=(<{ozm+P(J%x`auDRJQnIhBDy|kngb-^ZbOJCF&~T!?^$ko4 z{lL^KMx?x~&{M~5g3IxyHTiZ{T$m423EF#3Hi*lU64K2PTDP#_=Mh>;a&M^z{kcXp z&d@Fe0a~Qqw|iB`M&&|4X%3$9>z}9@&MXv1G|Y;j@Kn80eowJ$y^T)ACUkjdl$6=> z=~n(C8QqssxV4-LTk6H;);;C{!rmU(2fD+%;G=a9n+hvD01ZgZ$1RFOw=!M$gm06c zUEi<}QL+ev#}z6VEyVM%gKdI*cn69YdiNB2=D2h6og~H7G?(ypM}O~s^GLe51*w=d zR&4Cvf0Ct+9GvYo6~X1yu>0-f)efCoOYl6$ z+heLtoU-NLWy-(GlzosY2Md~5!|aDh1_F9!-KydLrPoCQiQtTlbPxddOo0nZU@-%%YO(4*G&7EeVrv(J8ezbzTU)|%GKaR?nsyszYvu5{h z|KY$`X`oDYFMH8Rb(fZidN4B4f$nS|MHV*Kk>C9jE`bMh!?OU| zJI{%ZCiS1uw`nA+gECee$;#eSB!m&)*q5fs&{N)@^zagwFVhiLDxr05!e$#~nP@C|WI( zM&u2z##eKwT><;~dn1lFa$&C4AVNg7OB7LiL^883;=Dy^Wh(Bwr@%)WpRgRq>sjdG zoEkjuxjc&@tzDJS;#f>c?I!nUid~-C~Pfsyp#D ztt<{kjlJq(EUTlCgZINZxMP4x1nC{lY>w4|(p{=c;ukT$=NC;WfG-O2y4;4$r>cOX zwDbGuRc9#MA0yNFhekvkoiB;_S3V@13`IQYAY?F{M{Uz(XMEE?Z1;Bu+2H#1r8Mj{ zhL^JFH`&ddlf=8AaGOEMn%k9%`PgRS*1X)e@rln2+HId4f&PIxRSsyZsv$5_E>YrTg(cB}18w<#YOXS&SQA2IG&Xy1&l(+X zbp+&G*nG=C3(s1q3KBPEV9l|rkP94s;H^DAh3_U>&rZN${$n70@7z@1B)rXu!VF~S zMx}=VcO#B~eX|RbC@_3RMomRW1TW*8OvFe|o z^i;zo&)x6Zf7q=F8xD8*8^!sufT$<5Yi9DKG>A;#>FG`upK!3j!FG2GC>ExeM=MfI#cYKAS%2HW_3aTtZXrC(+s};U?c5 zpu(d*-yG2`rhl_{^<9M%;}wE$i`s)ud)u34plsgyDM*(xFob`|ZDz8(6bYRITcz@t za*c4tF<$Mqy6}39V2ZvPmStZR|IuHU%DQ`(;3WkHl;hbyp@5~}9;Z05W(JqKv*?w< zo9^bD#Guw$o%z`maZM~5pPDV{Pt4ATczrf7!!vDH?Ve{wHNd>l7PG`iC<^-a4d4c1 z23Cx!OYw74E%2W9L)0wN6H+gKA@Qq?GtG2$jD=%R#V@VLIMZz&B=dQ4fOUpb9#Q{| z*iQaKY};79>HO5=DvN{t{-?C-EBkE{4Ab)??S?-~f-pf6OfiQ0sBU4&HGQ!O;6?Of zWIL&&xKcmQsZD~LI07|M?{V6r@_X<$aHyaiS+{d3NXsNWBC_cxdMY0kOpKxh<{Q|^ zF5csWG@Kt%QyZ4W$s(-olyEM+*wv~V$%TG`MWNd|oQ|gHo6~1Dt3=2ALu@gcZzA;| z6G)6HQEIA~!#Vsxf23V8U5Y9Jau;^jNYa&Nw(nS;E%Bj6Zaakeu1Z$xZ^avTjT5%)1HtPhb^D@;v6aM(9ZrsAsSlIxxlO)3c|=i+%^>6yqtE#9 zU@Y0!bCmn}@=)p{)}#$gprqD71mJJ6;*5^3rf;5mQHvGI$NR~L8mjF7zk{*=egE>` zktFj!9P(cN4Qufy1Xl7XQ%%W z7=H%I$Y{*L#Kgu<&q8NxU~20@_s^Z^q#5Y`0oi&6F76iA76y(UdZsS6M*pNdJJA}O zdJsvovi)bM`ycGk|0BxeVEIqlq*e`an~g7DPXRtMa0M!b0=j-(FNf1@B0diMj@iH} z3z9o}#6H05E8)Aci%PC-b6I)y6>kF**iumSp9JBEgTEE}$FF|dfZwIU8=sGe7>G-8Q)bC<3rHJK4nL>U438RBoX`F(Gs@x`ss)HW&|?I#@;RS zFBzEnWe-DI;F!%C>U}S1R&=I%J91ki05zqgB!&du;pw}^`5gS|pOu7}ck%y-4T=N) zPE|}6koC&~^M1X1_&jb8sx8%l=%C}{>vKNQhQaeWT<-a}Bkm!DS9J>6O!eFT+~FC* zcl)W3r?_V|F2Hz6-W6Em+qs-zA`}-NRLq8baEp4|$FPbNB8G0I%`WYmrN2^4MstbQDX0Axh3m+_z=E1?# zA3YAk5H^;%kdrCOKuL58($`g2o(sugHc#xX2o`gIiao-VOVzhS!sgVO5rk-!C7GA&?NhRe$kHsmd;RmOR9s{iFG+&bp znpe2M;ngJ4c4%BYd+nYLE31F^RvtQ8<7cKARdOJCfKR|p42arqeEg~?!66!tKcsB} zwZ&e9tIPJ#cS_gb!}d>%1Z=k%@kJ@?zTt*4E>XSG;TClsH1Dr{Nv;J2Z4r*jJkG}Y zBSVEd$^bS*1Mnv4XGNedjal9&Z7T~10chAY_iCK9i@`#?#$Rwk-IPqkE@;xv5W@OA zF@&HaowTkSmPVWVd?Y@IZ|rSyW7pZxJFlq+Xk3?@5J3NFFRSxS12K;ecMjJ!LuH3_ z?Tu=^Y!+On*inl3lm*UN8-a3LOYScREW#uzj_hN)gjs(xfy}t57vK;bmu_!S{}77Y zOZ=FQB*X1Qe?tB@V{R8DItBaMi~zEog%30xuC!9MY`%1^!N4sR3EB3NaxR93$kH@E z3tCgq0$MPUD&LwoHQm(WJx*w^FasiC)m0w%T!v2BFL@?;vYKE`N^cHAeZ|Fg4=4&htCW@)AW)y+t z!WDyjMn^4XZB==>RfQKO@*aGQ^p!x=sIm5A>|++(@UbMIpB!GW(&pBghjxBj{fJl{ z#zE)_jvK50n$z@R8-sq5#xsQ+wLV}}4e$7(iGsjMS=w6FkIdI?=Q@mC_Sa4G7@c#a zKz&7%KXGhqp@9k>fQ6m#rNAs+Sr8u;eBj%#+4|GM*$YuhZEL*Hq7A{Y^#oru3EL`I zE;CYyt|h#APwpD7k^AMs!*9d_z{~0RAUiyC-`^3LW=KTVaH0v^?V7NokS(dhi%}%- zv-;`QBr%{~4XaeHr2=qy!<{$zYW)6Vg>9K0DjL9>2|Ah8V^_L~vTqn?*lWPq?d~2o zq|HSKiU2`Gs|N;#mnv|ImuqmOyipSr6sK?{(}ikfDbk5<>Mn=W_FtVi!oAy2o>jXVWSc9mgRm@;1> z)a%_5Rj^-d-%jzx9E9R9FekUMf=k6h>fJ4m{|u%v^sz==r@qhT(Vn{j3vwKhybr z-_3u2I{oYweE;pa^8IjlzPlYrk6S1e@crBk8Zi__Lxz^&h$0uW8T0W@i^EWCL)PZh zN1aYl5FCh58yKP-_)}|^uH;A-+@}6yGO=%{ z`<$yTik7q9_zkR0OAVNallT+_=)sh_IJRSvV#HND-?zY%ZhqjQXb8EPbshKA7eoLJ zpXM*+ZY7rsli+@zUR1{;?h7jR!G%`GF84ytnbNqW2CT9GepU)Lm6&CQ@^`C|EWb$@ zVlV-c;rQuSn9QyLxwz2J@nC-jy(x9!!|fKl|f@6%Y`#<{Nz2G;@5MN5bnAs2%YZMC66BuhlPd7N^?A)`FhM|9cL}@nq{h=D3;Yu@)pHlu*{mLK^25%kFd$@Ydg@t=r$3d zeF8qo6k;OHbLY^_v51l>V$&PJ#T=3f!KcT9GLpf+3hxNK=kUypioO_lt>AgFh%-Qf zu83wA^z*u3DO1Y@MT|s0ohzaX@ZPU*z+CKWvae=nWL+!@p7o8-ff4z(ITEO@({=NOs;A}z_e>7r~}?|0oO28KPO6bAO9rPw5*%*vtu)aD!yn0R8I!Cv30 zq&+o}!7(e%`Lrm62!^^AW?A2~3+Kp_`U5s@eFh>4nTp20-EtL&`8iZVa&#m zIZgM(iH!-RK^yz~AoNlv@WWuhqqo74`C%!(P*{iirT%jNgSU4Kk~C`5b-QfawzFJa zR+nwtwr$(CZC6*9ZFSkUwd?z4_MF+XcbqtLX3qJQ5&0t{pOtUM`^LK0b=|*Eqc@W# zRoEPe_`^a7L=S)dLDh7_**TzQ!7rEth|oz;swtI+@>VS+O(0!82kN^nReF zeZZv*6G`NynO>Z=Qu`&JX#tXy2mgo$O5QSZ8^%p)o)2oMrKTqpb0tvochVR|<4ZwX z-2u_OPX1JttRlJFZ@o6gDWt>F5@2k)T`zj+15+ejcnjhxS>Vt0&Pgf%v~iRt%ALn1 z#$02r*i}x{hcTejrSjktDNTW%O8b3Y<=v@%kj4Ra`6sL}7a2gBk`N+v4RAwx9s#9w zJ49sZfNIl{U}2Hpq6Lb^aWz_oXeC<^K&ZCgJk&<`0$TpgHm(+NPbC~lVrqTdTC#}e$6{YCFTHU>b4ZUx=WYO%+&zfX z`v^!$+D|C95c4C-EHZGh>+`g+G=I~S#jzb|-%U$NQJF=cgJaNyWZk5&XiM?*!_^aJ zqhC9}Ge-=QGpbsZ`_P%Y{28iTWb0U|kTjZ4#$!UlkofoMlnEO*28Q_T<1hwkCZg@m zN*o)R@>N;NH?4ZI01l_WQ@Ghc7BjRQ$FP-vKrOTgHbo`^2an^VATzyQRgL6hvt?lQ zfTuXjY~?)U$5}i1&`L&pnC@fjytMWsTY$ zygQrRc$oU?vvp{78k(P=Bq;Riw03%wrB&7_G%@X-N?6t9gji7NADOjX35Ot+iN=~Y z7ww+-`+xw~`l#J{aW|qJWbNcBQf%`VMQR4^B5|hMzMYwG!Xc>qf$Y7Ro+=$_OeXe2 zeHqp2=^H^<#&-?=jc->UD%#GXd%rzzxCJ5hb<0jb;z(|~t%$Z>JGX5Iq5X{oHeK(# zQ!07(NmV!2Ea-dP*EA0ybA!b~xKBc#nskR2DP=6@;Tl`v85$G-I(Ho^kBV!am!pJk z>?Z^K3C=#Lys#evQ`^?}Ev$FG9xoI(Z?fRxcfqP@7ZB5-B^P1jqtyB9 z>!^Q>2j|mhucyt3D~)f5taE=&OJt3i2(@6gxH96zO@^!(U(>PT6#Wz?7Q%Az)zEQ` zw`>h<35HxRSDWY+OS#`Ff^-Ae4Y$suPi3pXX(-U_N^Q!1n991xO2Jv|R_7CWb~*%nhitR#u}ZUjj}qlbR%mcrU8!`oJOHFE-yA!H02T#u zW!^$gt64=y=Xj70-(GH!#LsXTe5=@G=gCwiyqu?pTy~>NOJ#gCR^8Y=-*}G8j?Psvia`Z#BaZ z)o<>yS8Rc^%*fAb&n$2?o60U9nt|?ipGy-RtQ(=Ci(3%JCm+PscL8NP5y3qCWU^3$$y6J-mV$`Y*W{! zs4O5nQ1CPxe3+h6U%FpTQ=iyV8sL5aF9O&`! zclr++^Vs`-0Ap(`Rf#CayCuxd__QCK@CIh5JmzdPEf&{P!1uRIc162o2(EA(+u`&(us) z5^vjMNDXoHYCEQySBgWR5?Xh(XcfGd_}0{F^YrRP!{d7W;cq)u%?vm;n%xQV{ZnxN zMR#3KEhNaH%EUX;ioQgOS8}TXJR>0#u^O0^!i{y6=4k@)!ce@DVcVApJ2Wh56+7Bar<1g~#5~?}u_1Xfy;lK^`kI zucoX6+)F2sWtC;UQJ#I&4{~<@&Bh)F|H<-0Zw*a8k9L?8SnZN!mo-pA^I8;GG(MUF z_QyzT)eb*@9ZhK*dIoRVOKwkp3Ncr%l6!X7O|-x63OnLV3a{9I-J>I>9{zpDYm zQR~hw3Yw~^O$_A4_{!mw$Z$e~A>j#NG&55w#dF-)`R z9&$s2!H0wPwGbHUvDg0a6nboJBOr`go|d0eF_Wv#ckRhPT%#r*_9*)$qpQ?qh>J@r zu~rzxxw&$0s4FYYr8Vf?3qkXFP8db6o^uVODI6}G=D}fto`o@>@1${(^QW!a^|>~h z5}y0?uMGILnsk($9a&HQIOcmXbxv3W(WPqQpT@)PqeRMq6i46hkVsg;)TmE{-UO9& zU_jgQlecRr2EA-P&>t$DW$Q|%fcl!pE@(|)>g697Q)TH->FiJ^Z#aP~4`U8WkCZ>F zb5{-G(fwwOFpeIftQ5!m(oc$4h~>R&FI%tW!!*~DNOEc(0I@~M5z~o5Z90gHca%rg zP%{(I#3qw#0_R&ZM*T|FBWf?3g2$xs-wdBZDX`lI~?AuLp_T*ahp~dIZXm zJBv{fq}^_Ab=yvi0lK#+n-{^V{IND9wG7(f61fckL%Epb-}?I??+yORbg-JeGY!JC z52}#HlSc!s8D}7nyMNg}j>99GC67F9qGwX~N|Tw`te3u8AdbZ9CWqd!kh1QKF=}-U z6a1)8P()#WV`@i0V3dM>gova}<>NN)O_EryD0tF|lBw|7cR*E&dsDQpWHR*i2^tS| zwfJYkD9yfpk^>zBsmSs5fQkSf%#UHt>QLCATYFsZG%+yYr77!=)vD5VB1U8ePTo}e zNVEo%fMgQfCQb=?_fk!FJl~{$g;O0(dwVX#&cqzh@j~R}T)0R}(j7~ANpHS_^aj@V z9tHi{_EL8Ih!QQOG%yb8&+Vx+kcoH;Kul5*Mm#B4vnci;u56_9A907RbUF@9_xhs3 zA1lvDp($@%Z#bL4U+~tMRerY#&AM(6xFB(mf)3xc19{yZyqn>AUrf| zd#8KhP`KAr=K8^40osYI5C~FI3f}e!nsx)K7V->m7{|9%6h~NqBzvdu)ljzkJ4Gjt9N7hQRi6gDp{l+WLIH&F9h+ zPV%3OImWI0e&Kw-Ir#!=@71ySKV-Me{|ybc|3*==va$TDt(N&8i7g|`cXco;JKMj! z_y0XZ`JXS1{f<2?|D_?8<$sNDng2WaV!(H!@V^*^0sjbcnE=dyZ%Zxb|6i8c|NOH2 zo936{fA7)-u>OaY`mgiI-_bHX8#{oRnfY6}%=oWKHxnl#{dbG^ckM6}$G_s)|CuYu zmi}9C%-~>b^3NoivtX ze9qUthCobmmvC{O;e=d_hwr>`uSA}ex4vlA-gMZ#feOpkmua+(7 zMHVfxud1G~Ys{Eg44FXJuI!POLHkjNi(U&wYp4+-jrlXLx9=<}VQ zkc4-#PKgpE`Cp5Bl2C%oTIO~vgY(vWCPj1Q3tUAE%pTpp$zOy5i&8=ir7!qU*B>mI zHY+`r6r+{ZaynYj=1HuUK{QeXjNngiX!=MYQL_v@w0pg{2es>Q_F(EYQvFt2ai zbx1(bv={CHjodFgzbs^vV;R*)2~mKhf0=2EC6Yd>C=ooEWpP)Sp(H~cD8+9YwT=<4 zIM48!pMoS3rf-T%tkK*aeIc&Am~!eJ7W^YIeiI>+mMt%_(#m#cr!=~Ba8YPY#nh=( zs81HV;6Du8B1CDFl7kZ2B?qtUl>LyV=>h9&bIEi!avUW_T#A)ip2Dvj46S*r+i)_q z@WTP}uj{s3(w&B2kx@No?oi)|BCMorIfSPFjBxA;8%HV|UkjTmVeaPkfvjlNVO;ri z9>($HqF~(62&@+P5GO|~>g$|<($TK{o!{GD)8DAFOFxBzcy9?YwPhj+8-zmIItUtk z@JGszN7=8}@#!t^r;88WuMc0}m!og|_G_zi>-~A_tNHEoZDU{<;Wt^2-JuZor}-7| z=V#ywK4kz%?eDUye8}HmJAUgA`d|262k*fYDrV7ZJBlxx{Jm+qV%>vO-#LW82e@_h z38MB@x&MbmXNRql_4@M(g*t^E2hyia z3i8@f1V2g4^Pi~fT^8c>F^vj+BL>AZijn1a;YOwh7B$zd`IuEKC463$*wu8R?S=%8 ziK#TwaU6)#sCU>t0D1pYOevpaFopX3of2YjQfwK&9A-UlMcFT6M%-MYaZh0|gqWHF z4RodJ>pl=fCG@ruBCMZ{4yK}_)IXN#40F7qLQeA(K;ws%{2~yw_8p|at||13ZUUh3 zBaLhUg=bwYovZ%p7_rsPgeK9Q^7QP;a>WL>!c>Bysl!_Sy6QdU_NGCeYhQtezA@ z-cJr>D~yfZ4_1Lh6N4SDasUs%*8af{h_jH2(fb7E%%I4jh_i%cJ}^@sV{*OFoh1_e zb08>V<SsoYpCTF|#_~585Xsg_`IJk7czx9f4RrS3F%1;mth; zLeB*9cbtnEhp`bNum4Qq$8a8^l8nlhTONL&`-~D~+uILa8Ak8V{d@uHETe&U9|<5; z`AfR{U|CZKG-L0$_7N`Q)OMKLP!=o&KU!X=Wf`rhI<65&PVMSRP5vI)Jr?l^pi)-g z$m6lZZ^L_TS9=hKphn(77e@T?_BfAc8wn;C!PWk#y>&;)amNB;a9X45ZcRZ`D zS*PGmBOuQf!`$|mQr-AiA zc|@qLf$zBSnM#L-=}x}CCL?O#$sm#jKz$NOQ$I4nvK`(<$%Z+lv=3}Lyc>wc8b@`y zzjldgD5CTjDZeglO!B7Pr&WuN>39J@Yu)2gX(~(-X;qHHh~_XXK_D#uvAO@%0A9Pb z9J0B*ZYAJ+4=d_S6b{_*7|quLYfprr=At z5sbs~gUJeA{gMl%*z(f2VRKg5EprNKq7WOV{P(9IcpmE6tUAETlIkcPT9k45ENvwH zb~S~iMk-nIvj_6Y__m67N=OayBJtKv`i4?55|Sb5TqS2w!&I#E+~Zs_%EsaCyLgQO zn(_Dbs9A%5m}_?rIWH52aDmV$gN`K6 zLZA=}Fi_m;uhzFE%*(Ql+o&;FVg2RRmYPHY;>k>8#Ax&oOPG!gG4!ld|A-Qcg*4uY zhHG)A@iiEn@dt<4b@88pdSoV-+d(3IOj+dKtU!AOst+Wh$h)Mf7?atU*7TO4 zGPb^5)DLHxPD5_*-#P6BGF-$xIHu8@3m?*Qs=3zmVlBI@Za>Br5y6p>(H$2P(`c3Z zTL7iQN|5tpaHJhzlauE)RfDi`@ndjPO?GoL?^n`An!7PnW}5R&-7o_Bw|~k$-s9eVu%}75Y{hbI6uKE;7_M;5~elh zEtonyiZx&@*NI&wZ7Y;cjQ3>19$uH&TL54uw!pc4w&H!Zuqp+VVHy=cK@Xkbcq=Ia zn@bigBa_Xj>k)Lh7aNFCaEjR6fVefnVx09MQnh*T!gmLSj64dByH&)%vQDlFRt4*E zQT?O>XpZ+x7a1;3cC3h0((VpoXl`qi6#SazHQ-op$b5z?rt?C=N56DBs9|gJ?_(mY zY?8%OlQ8&wI8+*Jk2Xj2SqS`xA40bHeqCk7Z7K$lC}yXb#aS|78qufh=rqgcc@K@n zsS(&b&P5#r3r+cv0!-V#v@b%t9VCN=GR?(VV@o@%GPGASbjeddSd6IJ)pZ{nn|VN< z@o6(7Y>8<+>#Y+N>}3|~n2|+S9O4KWacfHmLI@F|R~ta)%F{Xe!%qFi@Rw`VNeznw za4m0BLV~lWEK(96)#L#aMzq)P+jsycBTpE6$2%{D6wq@tib3Kg}{?^p0vzC(pC2UHglO?xiJV=fUwISt-q+HpO z>Pn)erM-KjG5S7;#H_f>z{;npesC2*3H*-XW3uVxe-vOvnvW@^;L@NOJ_-?e&X zN=e);Nnf>&I;A~*u%$;ToKE{>PT48 zr%#v&<eyGu}$bTYfY_Qko!abkzR@D&<3Prt^kkk1^1Z&O4^ULRH5M|Nh@iG#Bs(=8&RA5Vg z4ytZ-5o4X9l5kZYa*nZEasa6n)ENvrC4EJf{<&bdMSqT|pqu%cYY&2#OKULyhyeu!9Uv822l*kO_&4(2NqcmvdM${#?%yEq zCmdqlf*3V7$bp2>SY8}Tzfb21;g6GS3z@-eMJ}V=u*8%2EA}C2SR{xBrJYui8Ot`Ud09)J3oK6WGeNN$Yq9^LhT6kZ5z-{nZ?kj0^xF_a1mD=auo=c6uwW+xaFw&}IFWb{iovKtbC6)B`u(YMwNPi4lD^?jw%Jv-E zPxfFayEzsF{+q87s9lF25+@m^uzS=QEC!$ZhF-a|o7%b|ni~qvR$CEn)=Lf|%i_V4VNty93mpC!KQ;q_*y?tLx3k57v8wUT1D; z0XIxBbwg&lpAp%HkuwTER%X(kTFm24wfjKAF3gLY?#?Jn zFU=r`;ZK`$%!2y2gV0XCFPEprm``F~jyzt;G1JE@zxTaR$$Lvv4E`nHB@5@+uS1JH z>UGc9V}f`r5Va7D<4`$@!&UKkdG9(c7_G`&^U<4V4TUi;1685{mYXiczQPD-L>oQk zsbhsFTT&^x-%$yXQp2Op6ju^ZGd3F!q4H)Hl5&VJFfCfr*k|!P{GPvYvls9Ers9C6 z=B8UDo4Bc?rSgkKRIJ6Dy0!I>L&of7n&iKpfiu|^rBG(61(%VR=tH=#w?+0`R1+~Y z8^NbLMR>3{qrP zPXcDIZ}jO4GJ`^wv>UINc+n0QWPD=gS&@=`X}n z@&AQ~ElbcPz_iz@%&OzwURh5v)jMfjE~^cVN`R?8+}1EUwcL}hKS(o+x8%%MA5O%9 zb1DQBPA6H`zW|QV8nYx&T^nKeqH&ijXw`oA^NE3Dz}7z;T&5a)9oUhN3Ixwzo51Pd zRK_`d`FnEtIolM#Oq>Ur)GM(YF6=;&+q?7(S0n^qAQ_B*WMUB0nS-3-M3Vj%A1UI+ zw_^AZm=;VaaR&~5;TCNzkJk!5lSBZSjg4RtjWdlG;nEjVTI`HBv%A+G{9WvM#q5}O z8dMR|dL_kE3SotSHf6|o7x(j8yFwP?=9)2e@I=t%w?D2aNa)4qg@&39V{8~w`LE-f zIJ>xcFY8&IY9bkH)&+HR3YrRXDHY-6wYk(s;%wzCr!9)n^ILEwdBZXwX!M7Dvp`mbq<=^vs;NLuTEPA> zAp`a_!*WCIz22{8yZcBZLIX%Uor+U=60}2!0TrSLrB&Vo2rG#t&-VPa?6T2fmu9CE zPO2l0dt7xE%gBLtQZYpW%U2xgC8e=4+cLLV@>YeAw7jrD1nnqkcysD_vwG#(T&RrT z$_1e(6YNFYzJO>oqX^kJy@*eRN!eSIJ$_+1mBs_bN~UV)dz13`!;`=jt*Wf1LDIq+ z|H^~y6`)6W(9A`<07^DBzfIvM3C8wx9^;>pePlekJHUfACmB?%iHNALY=P=b)E!F- zrE>lXH)vMR5n-5>41Wt(l5zt<0OKcTmP*FzZ*d*SXjGbDiu8}kG0;5q$3!ndDL2^N z_)t`upIRoz8Kp-ADEB_OnYC161wE;`2=1Go8w+ruVp`p{`_SP#zXFV(>z;QuWL@L; zFB=Mnzq;P$?_BoPeIX?^FKkU-~cM^5XOsxN!RQ+Gd z*MIPa|Lj`)&*Ti|PG-&q^oF+93{uA5ZGZyKPG+_a=1%Sm|5y&XZ<#5bxwV~@v9+;{ z)4zl1{k`A)9;_^pGnI|sTg9O^$A@KEhxf{2AhPkCtb|58i=D*!{ z?d5p?*xjS*Nb7ofA4SxL=0#>vg+bMl77mU3hR}4QSKk-Mbc_EGGJ3Hs@0n)mO7pgB zl&7(27Q(dD0&`TglMbl*yXvjeDEE}71sK=<*!(`}Hkv;jg&R_9&?4wJ8w|}~@5Sm) z0}5!*LTo&!#F|9Bo-!3=@nQ`zdZ#w#MI-Cad{pg^s)bz^1YNvUZrZh+qF+L`^U-{D zjrGMbUqblu>-Bsef9b5v)%AJjL@51wKT5SbG+0fBeb4uLrmg)#9MK<5{=((`y1ACT z;(LFu960*&g4*$-vo)S!`pu%{>sSxa?%K1vF&b(L>NRq zQ82t?xJdNX$fre<<&8-fkr9B-xy!tae$)CTxhR;q~LGE<7!7qU7UEi27c|qQb$X+;o zoZINU1q6_wiF%&q+28X>Z(-BF`DIo>i>LR{?q~vPSv4+;adSyk-LQUu+{T6)*%Q}E z9YmoQ+rNe#Bfl!Gmkv9vG4&$70}n|$bqmx?M%R1v=}ist|Dp&QouJbT8KDkTwAb~{ zYbd(I@pr}I)+wm;5tUjz$f9PMUeWp-B2IS3(D}n(MT42(Gr)y)wv7Bff>K84xN|rB z7?0W^NAFxB);SjO;O{?Dc93@72zh)Es~W_5Mte)4J96&l)rY8#Db2oLK*N)`*F25H9Ck#XZwityb!&QW56vd(lx%y4)P_f_=NX;CRR zaCCDU=CzfP92OYoU18SAZLm1&!Xx$|OcIZQ<8+$ratflUrV#cM?NhE0hb@qlCSodD z$)B{1zrRaQey=*DXC{JDOoRu!-+9zE!#f&7T}ZB(JdiapD5K!2Ul~SC^}r3sPh-eq zKqc-_+Y72ei$_h|1&%%oZ!+n#=s7fGlLs2|1XDFTV|(zVmq-=swE>~c5Nu^AW+?Qz zBn9FUsU?BbMtDfOcqnJf4W^c0Kb~PBNiTzh+xr`f+UUxrN%j9R!Or1<$k}a!7%G!f zWYNnEQR*MENZa80Q2~yeZbt!>f<1lB1Ykukj$wxczweHYny{O}K%W9{Vr%e14NKHF zMQG==U3b#wM`g0k6cjYB)d|eBhM~P%A+D8LnWQU|J2v?+TGtQ;w;FX0glr~4D5&#j z?<0R?5nWuaQ{LC#uqr^!aHr|Lg%gHzDXS75j9R5qC8y}pF25-gqvPo|B>rg#f$CREYB z)u&32n!WzDPRw5jNTXIL3uWs(M^k$ZUYf_@TyoOUT&Ct*ljA>E0{6~TerimyL0cVB z$Ab7170ecS+$taSF1MSiMumM(qI|*&l;lUDNAtx8}Y4(z-QSdsBYQ?_*#E?5-! zz48OB@`y!)Q)OTs@PNN;n@;B3K`*s%Qza8Ici>7mO(FNJ#-Cko8taQrpRVS#7ix=@ z;pP@3tNrfr9_*k;m430Rb3_vmgELbFq)t^v>$9IdGHm)lM!ZaW`ya@1 zKYP*U$Zl%_`w8cFN;IG?lzy8~71>E012~*z{uoQ~`!o@?26om^;R>skH)B4Zi#|kj z6(;FLIu{j2yz(on%J1-O=#wQ!T9a|rA6Ufi<9AY*aO`JDia7NjA;VTFEh;Dl54&j@ zFqfkCf}vP*s{tz}joK|GhgiEtxxNG}(qC5OyQqOeBm>=Y zLllvssZMK5z;9ASE1!5S75|2obsXqlKzd^8C?XZ%#4PxhDP~~hRHGetsV-fm7v)I;Pz8njToJ871RncRqhsHEQ5mjn1E8BtGQEL=96< z#dOJ5k3vlf>d9OSj%RvD2CcqCRFm^v(XF)VQF3B}l&5PJ1irco@M5(r__yIFAuP#g zt!VzF?OVl*6qnULN$r5QUy5p<>(wgEo}kl6id0{UJ3OI0?FhhxM1<_v&E7|C1D`3u6j7X5T=#K z9;Capj6wlDZ=6HN#0wBcU#Jks1C+US@dQQ6^ zyr1d`!NGRm&i**{u`Sn_pV1ACtYupW4;F6|6@oj{DMLxvA0x&BK;hf|l+1@*NP?r4 zdB}L8XeG#fN7K=)mbWMW=nNf4zQe`M!)3 z@V)Qke!ac*`h0$quXooBzMrwaJnyHfq06!awp_%;__*u5N7r1)cfH<8llvO^q#!=d zp=woB!0}9q8vIrxya0{h5fvirF|b~{@_=Cr5lLW4L)*z@K@$>oY^7i%yI*gk%c~n3 zQB7r{31q%MRmZ@YW-|ZG=WfpEMz*E~ku$V+QC;Sm4pWCl&aoewgX>nkk6Lh?exUhl zD%%e6b|bLkC=J_?WY>@g+ye2aEgizJ4&aZtl3|&g?Rim%4rG#pGG>)@Mw#&~>Hzpb zeupUWTE4?k2gVTcOn<_H%3wojHpp!SSIo$W@rSqb<@JMtQppepg0%&NR?t@PeUzrU zVkN7OY^xbICPi;BqdkZFsmF(zj}q!x9@0V+Y|`6Wd~iE$(dB1l0xG4?UVkU8wthxH{k9H3s&nrQNyr zQX9^t-R!tXGV%?oNN4#{hgDlR-q~F8Y*?l4yrlXoo%+6e^O5-FRjIO|?w1Qx_>w9D+7=L{hEdh41JRqV1$M8vlyK^KM5k!V%cyYZBU1(~1y+R6wUE3QZb)$$x~ z=06}flml3BY~4e5T~Z;t^QS}}@>`j`C*XqX(u7vor0AH`5e9?m6xqKR4tGmb{dW6gAQ!m0{k5n-6z_-)+$WSE|F9G_gHTt3D^UU}qh>E6(bLs%b-3fiDB6 zIM6JTEp1Snif~$z95iTy(}sA|lOk5M9DH=JasMSEemOR>fNCm`ov>lWp6>Eu1~}m7PZC8;X4r(Z6XiqI z8rS6W#G-NBJx*?}99&FZ{VeAxO#$Buhhjj7Gy20^6hL0=H*?s>H3DYcaIaQ=kQ2}d z?a=|k0aV<+ns>$fQFU&Ncwj6grT0)V7mq%Af12;5CI@+JBNbI{2-eW4tY}1n9Zh84 z3+3aiI>p{z!uB2+fY}0?QlUrK1ijQhSE)nTLRo^le-1z)W(l8bU5gkV$ zE&(ar>AR7US~9D^z2B-G6N!*yb2M#&d8T%cr>(cv|yEAqTI_7(zIUV^Q zp2=xrf9QaTjRh9is5%A+FKo{6qAB@GJeBf;j5r}fChAGOZ%rr>dg^SEWH z-Ax>2on4n_DcRmg(zdmzeL}q}jL?_&%4oQgrgwSdM@vvP&uGz?y$>Q=rOY0hy7`Fr zAL3-|l$tdLr*@m-&)oCnSHhA?{*3?0CasE7R-S92)R>N4mYfCKh<=qOyT$Sy=p=Mrt=Gqdsn&bvf#b|UY+6uz}CG0m^nE~6#5R947s9=YE(;*Ui>jW`3 z|B9|q5GDIm>QZ$1f_o$Jd^tl2IMFRIaWwWRu3y5?mYHaW^(MgQt?m-rLnDAIda-k+ z1&uEsRCsIDmWKKh?Mmi1jlNv4Ry%o7UivkGoC; zn^X?Yee3Z<`7~|D;2bMBbi2tSn|e2u27wEE;B_om=v2I2om0pkTq|c1$CiE|!nThN zy>CsJB?Jy4UQV@cR<<|D!Yei_T{<(*s+_2-(@ID|E&^Dqef!MV8=OlYG=EN@l2b`e z-*bS83PtD~K4?%?SWm&uEi)<|?<@)2oAMXZ1}&lRHn#R3T6rbQ=mKMvvJh-(c&Nc`mD0ICojb$!wBY9fvM$ zd9P!Bh6#lMG_g;;veD(7t$wJF@w034Z^t>)2a;{_;PzG22eSBs2JSTU2U=c$Ep4C^W)_B<3_4eY}f-5_eqyOE0+1#CM(|=xg z&kCExxWMYqj~JSX))&#)6s=|knHQZxr>KtMsMyV)ORON*5E4Q@Jdm{sr`*yeGr!D0 z$`=)-5K1cCf-yot^Kz^y0Zw_T+2}TF^*AubG~b!Vd7si%J{wyc4ezr+T92;l|D~U; z&B($YVi3mSj9_Z%wyQF&8P?O5Rax9Sj5ljuozOage3Apa+Dk#VCEQxv$zGg6vk}u% z?QSQBS7L>^vw>WpDp>Flj?<|$)i}pEKiz{+uF*MEa#}~a^HrMU>_s5=@D8NB#UExO zL>ySG)-bzS3yQ%_GTT3QBwqa~FNwRTB3U~YcrEWKRGqR=ao44hUAn|$KA0`=vt8q% zJa^B_liXzK+AorsQw`9~#+6Wj8D7lDERE3AB$SySTc^GclZDo>Pq0NQQU6qsWq&s&tybw$PO?@fb^K!rrN2|kiqt)E z1UcCk%rj%27QhpoU+s7~3O16$$vWAT zE3#lr@g6k+^&>*R=SC-r5J2)nC6i6cS`$y-aycAC@>J`m2^33`(uSO*!Z9fsYDx=P zMRkz}j#6pBA(R}>bD4|xxZIh$UZg28U`<@H`|u-8YxzZd(oPRq6{@ao%vWT~3bYU} zz6oMe&b2N%!u^idI8xAFLXF7eXNnFB7aF@Dj&x)mEJ;4Bb+RkURMlO$E1z{3Vh_Zu z8yWN9kUo~BHWbls6xLbXwDZJ4$z=k=`3{f!r?tEjHY(<-SsAc1Q-)KX7Q}Bqn-Q`~ z#+{t&lgnF%Y8ZazJmKrH!_a$F4Yu_pRcBU3F_*Up{>{*uCKXgPVr75VnTvw+(IDph zeu3Ry0C?{H-uyn|{%xE~xooQm68SH{gYy?B5|jv52;9pj&7b1ahtA~*P0K|R&2h-V zUFH=I)qwIpOm?(2-IrCFpEr_EGQ;h=qqd!KwrLO#;@uVtG4k`-VU2z!Qf8^-ymWVq zWWZ76(c7LuYpiszmWbwnl@r$2Ot8v(%KE##P|G<4Y%-`3-8(t8(f~(?F~Z z7cv7ZhsUR<9Z55SBgUDw;&{i4c2_JPCE}<3v=z~Vk%=I@%s@9A(->0;NVg&_UQ3QM zz0(+L#;b=ELj`V{J*quns==+trD_Y3iMS}SBw%}@8re;H6p_p?XYDH6FCOeLu9yUhv zY#@$fhcEinK-6eJN33IO=62XZd+wI1W#T;2mh)EmK*fmbnfMfX5v3gbgCe3^w{xPh zMD_2Ks7I-6SfaelACn<=+4R+1c{3L$SiAP_X-o zGXc;1=v#ogl}LC zZ4Gny0pfKbYyxK$=osgO~IUv=~2fS)d{C;rg3hu z8-CU&0&>{O;AT~B6a|iClD~G?i~CDIqflQM=G8*sGnSt+n?Ba$$nr*waVY^=u|NWkKcsb(NCKa-enT z;B`dYA&SA>%vPU_-VweMFYtxq0AdPEG5bfqDbJ0GU?D7nYQX)$qloORL>y5JGo7iE zN-(<_ zWWTgKX?ov++ayI%H6#Afspl;QCD#1zjI=b^pY$zwhW3#D>e7be6h)Bzse;Uk585Slwr%XRZQHhO+qu)+Y1_`7wrv|X `orz$!+Zg+Lx z*A?sOkGW#5ImZ~^*k#qn4rGL%C|nkEnjz&_Y7aeCY3qCCO~)TAn`9n21i z&OMJ`}6hn96P($WK`Q zQ*o$Xmo)Cb&b=}jnjlfnPdgFO7Y;*Y$jTE zqAjH(w3!S=|EG@{?9?T#+{K9e8;6>p0Z#M{Q$=%$uP9dpbX>JRdTuJsR%J&|JQci8>*_uJh%cDrf#>sCiuJ@h-AI&^l5UG)~g`Q1-8D1ggW|LyV# zY|C}?Y{kE*166`3u_?4iL<66S#*fc-m!19_eu0`V`+s03VEI28^J8QGcm14ziH-3m z=EutLQzOdp?-qmq`R|}(mFJ=8N z;yla01PK2(#rc035z~LTQvPK`%>OM{%KwTr;lDE?7sH>m^>1yhjQ_>G!SXL1rT_gA z{dg7rjfc$0@sl%VX8ie%*%<#@?v?*5BeF35v3>l*4Dt_i&%ZC7{}P{N`4>ae|15j@ z(^34Nv!}chu|M6d&##euYe319;nAbu0thP^n~P)M8qg>IXm1t%XM5{wY*qQ$RnApe zbd}iig}u1Hri+cruV2IODfTya*VlqyNs#b3cu$p+LC4n*&E7=Y9Hvikf3b{j50fuuRL z__Ri$Xh$w2Zy2%ovb@=ZvHy~a%%si`VdNLn&!qz@Ni8d z0PXEg%)mdg@A4cV2=REouJ3wX|8ULiU+(hYCH#964Smeq0d6eZ5e_*Z3wO*tP5gz-5X z*n3|q;cdM?*vSEb859D_5u^A93;9#~J9zH!hus2_Q!&on#)x6`MFnUye<5{FJR-;C zC6YoV&zJ6nne}#0;%pC&&Wwb?fRl5_2SMS54S3zoNBu^gOOk{A?&RPUyIb}_M8cAT zGbxA+Mg$~;>or-84d>g9hGbSAMt(%(dEa;CDRs!>9BV_&!wbHRp@5)g%*K0xGzoz4 z;p2T_@TDU0U(n6Gd+`rR2B{kX_qRcrD4n*y2?HK6V(}p=VdA9(4S9&u3Srqpi>4vfyC)sz5@YENW56q! zbtD~4TcBywa6p`wvPAQMzbm?tvP>1Fn7AYF=O8u%RoU@bY=)Q}%rKALH5#h>VCAUB z|Am#V0UB&7Di)4ol_#BPpJANV3YEfT_|gzK5yxQ5r2iCudw9p3H94?V79;Wf;-F2k zD0(CA&BtF$2PC9EX2f?(diMYVx`!b7o&!nGH9A@dL&?l4xD-(^Rp1$r>1$*aN zXdf~#KaR{93J|{Mh6IW%MN2aDYysCKcp1-I6%S-#>m{G}rHnUxJg6%0eWeTjF#u|@ zRJTmPOIR$TUGP$H!9GMI=&alg_7nDTvZD@rk-h!2g5)q@84Z37Y~U|W#_qOq?(1`o zPg*^_xjf+`lM(+6=t^{W5LgXetFrw&y4rJrVDY}B)ySGWjk=x+yTaA!l|vmExjEKh&u+%x9-~Do8eERx0IBp+jsXoH(#VV-Di6 zEU}f56GP{^@Z}6yxa$!fkABGVY~FC>y-~4;(#ic2SCX}b&x>(#F}4xVa`xp zlOo+7-tuuAeU$AZ`am%uN5msInfZT5PNnrg&KGw&aw&U>IYVfKo*o3&zwXyA7iO08 z`}Rk>h0n+#6pYyX#a4s40MLFR)=*y>Eh-Wd!=&sJy*mEV$ia+HKq{XDL@o>v=(I^g z#6bhdOH~T@-Lf1PCkAn0UqecsW{^HE-551u&A<{o@FW{JwR)w0)7PR5FDo%GU{;YY zCujI8Gm1!@UgtDIqX(_i*I0vlK; z-Zt3mQDpZEwGK9z)xd}oi@m9R{{_>C>fMlokp8T#=|#Ed zD6`jbO#uAR@Ws%N!#Kt!Mihp39yJD7sq#ajmG= zrc|&v%V?M2HJk7O2_@1kRZ14Dr8qBNIn{KIp?X|vap?>e^u&0}ZhG|st*>(8?)6hA zA3(!*xREHZJ~8)z6)~}`t6LIKOd%_H)|)AQwGmZhmxN(78MMec3Me>`&Sa80cvdq; zs;Op%7&}&<90?&hvLLZuaQjJ2sCoa%7roCNASCC77tQInUaf1G9acfVuc)`l<*yTK zrTsR6P43_Ct*`%VXVrYeo!Rt`2!c!ODjvD6nG1rt$ z3Z`0dw8Pb5@S5q=u$sl!!^-Nq4M$@hi4C|PZDfQT6Vz7oyWO0|?&Ov7))$_}fpuWj zibq#5jjL@*%ZY|u=LjJ{l4(;Ni5=IGWo-{e$Li=w=e}NMis?*NKTS{LKzEsJE<hxmG1sB?8td5By$c0#U2CZzk+q&-t@92e}~+gcW4VdbT@c z&jzB~Bf9C1DW|@}$eYQa0liApaC7wo!E3V7;_!v%+WyTFrhZV|VrQ~%7y9G-sLx&!AR>Z=6GN27a_`|~H6P^y;42T-@`+ZGVbvm(`FP>k5tyS|AQ=oTls;IGc5 ziB)z|sn?{9XW=GKL$jE(_%hb))!NCXG?!mC?|@sc+DurJ(a&GYX8^|FAvC8CDYo*s z86*#8lv10OMG%#sk)i3n{BF%O_pT5@gNV>*dfqlc?ss$m>XTi?MxZt5*Va7o1wFBL z-$eFO?uq?6UwGHBhENxU)>0z;D)sf{)i zUWkrF1LhZ{b?M+CEKVVpB+YlhrgHT&_H)!{=GH!wkBC!@&~yDwjLT;?E)_|W!WP$} zd1*CC8E`94S)D>@@mTA%_ie#aqHk<1wQz}f8h%Idi*CP|de|C#`X(PTYF`m&1gBx> zCno7M8pgiw75^m9S2MU_lzbhTPK>2ZBR~oJ3LsE~I>i{lLv+13FuU+O29>=F!K5CI zd>NuQ$yhOghB$;&sJfJS=itbPS^m1NN9f8QVA5{z8vo75pqOP<<7xH;RS zUey_5P!=XNy#)bxYSD5;RW{mKQwAG#oqr#TAA5J-5>%v42j)f;mG3IF)&Wu!`6iQK zbkckdfNN*9RkN%(M(A(q;sZ9dGD9L_w7S@KIHn08u^*E!3nU~2eFk6qPG;;CwB+#V zoJ=p~`C}p*pX#YRFVYlq8QKhU2k0@nlW zf|=}9y^IkMTN7a631l-+VYB!P6L!-n^>NoK#<0$n5jTRnKlD_~ZVP! z2vQPEM_juxxnAAF!(&|dp9P)C>5#G(e3h($^mMyErQruA+^+l&a3Qk>WMD^V{-i07IL>(nXdZ+RB2bSu0$T;!RDq${QolQzfFwVA$rh67I4C;}5+6gn%8wv#{SlQ^4+#NrVA%Rvvh?Ki!J9}Cu)AQ5hyZeU9X(j*y&2 zF~{CLiqWZ-R?c!d=H0I~t!OgZkriK+{&rh!JR8~BG|QaT(Ne}`Iq0b#meA1=S5`6X z(*09lJ8LeuF4mG{!YC%f+frVAd%h!pHSaS1S$n;l1iPYA<0fGyJg#*MTU1I)WYSi_ zBx1O7yd;_<1Cof*vmzjUrHNDo(D6V*ncf)af%w<;<*`A~WLbIgC0Q+j@s`Eh>^4;4AA2n!^c z*}*1-`wK8=uX)W!nWiqbFVR6G;-zATZ+pJXuBeL!SVlFQ7`yt}C19ypz*t+(=s_D& zj{83MERm(H^;=VLF4F_?YrY!hWeOEmYXQ)t{mmbFbh_cPtvg@V!h%1!?`bDK1Vc6$ zCMe-<#_UP)o`tP(UZ<5y97EL@N9xt1GzkU7qtsGl@;amfu#&6i7_PNfWoO{Pr!XBL zaA>M}pMnAMWd#%<@lYAv%r|>#j{+S+xS_Dj0p0-{dTqII}I&vShlJ?dupI~~589YE*;^dWqQX&@J z1a^c?%V58Yy?`uv8t2kRJU#P{J{=6#j%p6I4!yM3vbu8Ycp|<1_LmNP9xVeQ~ zG=WBl6wIQfqhjCYE2P_mugRBJN$+vs%0_-@HmQLiwaf{!_?QjOnUuXWEn+!3NXd4iLluI zIu#4>qo*ISvf}o*zGJg=ADI?W0i3~+QtA6UF7b2 zUMpNK<1ePV3=Ut8Z_!sDptb884=}*#Fu9C0yzO&Zb-bSK^l|Ziv~Xl3CaxS%V#>Oc zOK|BGbNl(B6&jGq|E_U63o38=Fj3JTGF(boyo{i@RjgeY0&Bh@aBDaUh2cu0$0j`!-TyWo02)kf}pf6Uw9=u zTj{h8^&F3Fjul?2LncfZLcZ+L?`D=;%!^P%_WguSWbb)uS|VM3MWCMUZrLo`2D|LJ z(qF$y$D1E_7bZ{jR0}W8`Gs2B>`O*=%@9QTS+B$G(zE=hb?h688>?#lK&=J18|^&x z*SV^;jCd=VZR@tVaFbrJ4(_g1tM$%iv?fkK6>bb74SQ~FHBHqee+KQnzDnO;A9LE^ zE607~4$YVx2+1Iim&*_fwDZm#0|)}Y%17!BSn)mWTA!HEcA*sRZa@vW+%T;J*m~V2 z&;{Z1xP(eKnTlcW0R#>H`&o|PwQy6t2wU&5uF9G9&zdB5;@VZYQHV)pkr-ShBDS39 z#Do`YZs}}oF{h#EZM3jAX+(6X`gIbt(BN+A>r{h_Q314|Tvjq&y>ti0W5ep$nU!NG zKNbT>@BJm>aIN53*0F1B27DP?57Nq(-BdaB3n3W32GXYbdJg(Uxe}Ej54(oR2=B{^ zsZ_+^>!-OnZP=wseIhLF@2{oorfQ=-y%cjw*uS^UH|SiU-5DB#NZ*Qy3S()vpio6*q#;VvO;$@RN zXs*YU=#72l7T2~8COMDsxPTzSivy`nRn62Z=hJ+f`7d|ITT?4T$w8Y$Zfm)-uDq|+^5+kt@jMBib_S0BxP6&o5q9S6QSMQf;Tx5O~ zOb(<0J3J0#!)bh=onQ$k@(-%4Tp%r2VARJc>KGPT9oaFN zvb(1}%`<8SRS9v4CM&(>w?>ZdXA;G{M<0xfBZ{MWKRj|s3e6K&&d_Mw1JgcqGRGY0PZ`*^f|!Nh7k0^#qW|jWoDzqT@ot9`yms>>M7-V^1Sn+ z<`D7Uuk$^3JN@A8P#Q?kI-}jX?7G_06+?t2abpE9F}TS46hhNZVKduIq$&!2H77z})6}l&A%JWz3DK!%Vul zpaON|@aiR3SCcXr9T2Hmvu5|3Rg#4UU351+j1DLx>NXflpV9tX7LLVZiKt^~MloT` z_7WoW?hXgBsM=+S07V$K_T?jCqAkg(mZUdJ6uM~Jt|RlNHjSm+`(kRI#=AE-50Tl zT0lBJOcAV~M2XrOKFN-bBn-@0dgOr@(jXqDIprhD6)}otYiT6oCp?tt$#jNIrQ0}E zO|E1XruZvPCK?x3Q4(;6h+|n-f&}`QNt=eVXO86PY$NxAKJZvAcIV_%Ay#k5wiH}2 zyB>!&?`*hne73R;%*wg>pO{&j(CGSK(zN8EIlWu{`b`*Q217(x`At8HY=*W>E=FH~ zyu^g6I>8F7h7-(GJV!`(c3%|*s*Yv3RGC2eix75qpnLRcRIa(h{Q*}vprdbXT)Wsm zF|*tX2|t+GFe{WnFsnLI=rOgT=;i72R{|@nFX3vxY>6`wme9y{BG1daI`i+ zJ_fj-5@au2M;P!laINceJuoHs{58)WH=y?$xyMw@LnlM#j}L#9*@39MpZH)YrV_HE z2bdujF2KX?G=TmZ8bJ8z>-S$$1V8T=KeHiUrT>xq`hPOg%gV{}ACy(bA5n;j zo0oUY-LPML-uL@cJ$BluyQdF&JVnBv?`ib#KaA0@ucOVh-J4H8+cS5%TY;E1_|9jRFSOu1ooJ(+WwX+d`+&K|9W|eO zF2l}!iUa?=L`fZ^cYBTNtJ|Z^FbeQ~nt{B5eK&R%2bhJ{Nm7epsW|?vg|G|nr+`2E zBR%+h&&qq~VIzK9oqjyiD$(VIc-kLXBBr-)7dm`v3Q8wc9NLVCCiCNYO8!clH zh#;gE6Rmy6N0JfR#+2n#vQl%;4NM*?Yw8+FK(KUFO}xKpb2VlW1n=5tTV8w*uq+-k zaJvVa*XFGWd9m<0Mc5Mty;KN750DNQ5T))>80+1G&pk+)N9Pjg!zDtLl0)d9lzp~% zOaXx+)(9tmHzm2@F@%_eRY%a1R+HQtxPM*Me!sny&hY!Wy-(49f4+r{*nK}w?80w< z-i!>O>)~ACT@iAez?kHw1YN~?zJJX;FK&~{P?o?^#X6&dYe830=T1MF?z2x>eW?1+ z0n9P6T_c3t`){+s=k2s;_XSHm%6)}De#Il8QGn=2Cme1GJ`xnNB)Y#FEbMBtOdtuK z#{OC=0U!%8X~FM*2|ZAs>vz6~ZO;;hE`kSPhTjNf^P`g1WDF01i)ALL3}S-4@VGmH zbfRf`ka2-g+M(``IQ86$0sXXPRhII zluqVEP^XA;1t19uSHahDGLutYo3XF(HAz@oRju<4uC$eVknL%XL?(VQFa+0QtZnxj zgK@Y1c~2XNZgjjZsBu$ZW&v~lGKtkzc{6}kRzp9+s+iwe+(HtqO$_M(l zK}>zfZV0;1sh6(tOg|-Hwx$M=yYjS&XPq_s;Zr&!YW5QFXn}vjtOY)5%qbF-JhvlR zi-hZ_bc*d{Sprrf&;xDjiQ5{6y^?-Q_7dU@5Fq%8C#t%*XdOQ1 z(yUSzwgpGXOa^q1#~|b`2^X{J9c7lvF;>AzY}uchdU32BaEr%<}7R=OWgzD)SL zxG>mGQpN0KKy_0Y6r(2WV|n;zbv&}MieHlx19(EaiA!2FRP?>0guKCm}N;5S`@^xudHKEo_Q?p7Wy4SyksB$f3^oH#Z` zQjFo&mpO2o%OB}-_X#f#@izK8+{%K4Z?mwA3t3wJtUxOO>;r(auUYkeXt)qht2P=yM~qFz0=5~48N&?u|d>Z)!VUz(!; zrtxNPU4jpziXA+cF@M8Ql2$6T2)Bgy3KtHf9Qx6n?YocZRf7=Is{d|HTSJ-Sh6-mj zirweA0WrD~aUP?u2F{nVdDbp-zy(r6Z7?hSW(rsluHLY`RvYtXq3^VJCN7W1q=bF< z#4KK!3_xVCG$9KRWQm-~ATpbRpsTbU; zR|~jf?6E$Jl&tO4@_Iz*$gp~vc=9bu zhhobo^iNOYmRGkDf&|dYNNVMOu=L# ze`<%W_gO?4KWfCn&1NC{T*79#vE-!8T* z=f;i$z&lYdI!N{7W3z1F>+Xnoj%M4IG2&C&HNrFK8Q+~49GZsx)bS{nF*6t4@-CJ4 z+2xDjHQE_jmpB1+<+9(;i!{}1BF}ZSp`N!?ueb>Z(os>5nz9cCz&PpFtWqx}(r(Wn zlXl8ZX)3>+%xw670w?)>mo}Nd?1XslpZ+dty2=PFDh<@C#pyEgjyX_mFIW*aR)5+i z1%Ez9GdR#mMkKPEl&RHbfKb&Y&qvdSuxv~!uWqsq97GB!aQ826+0wnc&V5nmPo+kG zE|Y0xQ;N6oTFLPzczm?H*ObNx$go>CWARC!;-NvRS^8T&91yJ8cNyJ#+6a!{fUf=R zey5%cwK`-Y-g7m-D7Fz3icRtHDpoymHf+dYk9F#z6z5HIm`t~eyqa_MhHuU1MTgr4 zIpf;YYa#~ig6SQ+Av>$Id;Bu!g8AfH*?T?@vpahe{C*r@t~q*K)_vo_76~ zH7m*!KWN;lKX!Y)gub#>1YZ{t^%JTi=#$V)uZPg>BVYUIXyjnWc60Pixz@7A7jDw; z&3?kS@{p=Exj<7?>CC?WLDiYs>GnhD5k`_V zoWB615KQ!?g}woXTSZQ^9I?WuX*_-W9n&8PQ@JZ{&ot%%T=wUgq`&5Zg>qR*$(Oc! z3*?I2X2w6ae{Syeh4H*^7WdiQlGsM3hoe_bKn{EJl*`H4!sRsC==MNLxnF@%t0VrBX0_$9u%z@+ z@&LLAqVR}nPS{?XHjlnPm@<_`1+3S@B@KvnkI#uVuvd+}r(8wga^(=#H7y$UYck9OE*>F3SNnX+F#s8HfrGqoH{-lv1*ROt6Y^ zV*6s=;=4kU)1Y;Y6}HH!P%Nd>vBW=a2$==1a#I^-Fc{tKA`&_*x~-lkR<04>iVbq5 zS^ULf*j_QNC8wMiCC`7VW1)z;=aTN!Ek$Nqv|1wJYvDlnI1m^1cV`PpL=G5q?m~xx z71*X^(_OEwsR|&2U6sS1~ zkqG;}?1}Q)nP1wDzD#%iSiENRODDb*%|vIhC2`l%J(g(3`zm4FgD>kNL)I%5SZSz& z9Doo7b88PpIZ<((OWOCsPdX8!R?*t?b#+`xmh-M;pi)s)gk8RRO;>`U1TL~EZf|XM z0z_?|z(W;^sFo_rwdhiJSS~r7cxpCP<`at(goXaBME`G*#u%UrsbyPz-2pp}>~X!T zNr4?90IcbhjK}me=2n2Q&kM5`d8@U$QkK;dHrcZDSh`)hcOBYw3Jhc)V#6`nh^YXU z6Tw;|ccWUM8De$LYIO=3yV^R~g5CH#xd?-qpTfMDOM?Xg0p!c_>W=4GZdh6twDA)gPZQH(`13kmC*89@Ik%eN;y8LehACO1Q-L&X6Y#n3EU z+6HsR*Wk3<)XvzzsxY@iwF}L(hv@)Tt8EqW8q_o{&k@Eypve(;CD=FmK zyifc4<$(5YVBt1Jng79=mG%E*VG7F+ckDl!L4J@-?5vz@1S~(6#(zHhU$6{Y|ARec z@E`1{f8wZ|OpILq0ivS)G59*USUdlNFZG`xOsxN6e*T}GM>!c;+5Yo+RI3)J&Eco^ zf+H>>;4o1Xjo#ltK(qSe0DJ`K!g|Kl+5#We|zimnOn{Md42zToPHqc>)x)%clG-Ry5#ZvS-U@>_jR1Z zM^D=oEO2}H17dnUcI!#8o85La8hI;@RnPnQLZ|ohdpzW9Pw#r(y(t}aem2=|Q(n6| zKHQ7TSP^}Nq|c6%dZB$HTNa|Br$f;wgyux`Ie(;|p?{ZnB_2ZhpX94x&?C{>N8+yutAaU{iGiZnZ5?|TFLF(}% z|K>~3HOBGC-t(pamw#8O#QtkA?33H~V?)FCz6MRt=Zv1aXS*jVXLf+SVC>dpcVr)# z5F35BU;z1>GB=k5--?I+=5sd!2ecH)2}jrS2f{24s{WR-m&`E&Gfxjf?wzQ#H=l$e z8#bs+{0Lv}v%-HPD~_Boqa|bVT-1sZMXy||OOHR+v=|lQQzdq`@`3B^$eE3iZA>eB zLQ;jpd%5zySgXtE=JnTT^|N!&Mx%^;TZj*VE55TK1r07Z7C+W_qJK3-j{C0M$Dv}0 znxa*ecmnZJH{~wDwV0vwUvB&rsq35XBdHr3$wbWC zLKpNju}b`Sct(G6Ua!OJdM0@ozfKSuj8=vH*nI-WZQu#3hkyvt)eNg(7`TkK)hm!a zNTjZPci(}POsE`%j+>mg5eo)W>7OJ=z;Udo&1xhcZ~TQ*m8Jmjb3rQYGqfvUZeku? z?_JaBpVJWx+w%Jhdr3#bP$ANqymjko75LO!h7AR7_7taT6d!$jxfEhVFjM69wr94o z3ehpAP~M|Wx6c*obLg>`Z$MY8e)E+rb)CqjZ9E{Zn6+E;nk5+doN6P=HA*;XFg<*WbSw~Zm{R|+h$On-EXkz8(5bi9 zONJwG2xTp%C8MQoH}l+df3?VR<3V=G%H7NGhCQ-Olmmti#%uH6YmzmMGLSe%(Z! zRk(rzUvP!1rMUr`WM-|B)AG#R23b2o(Ls&@Z!OSTO}z8DD}l22_ozg#50&`t7DL7r zbD()wW|m3+=?p?{#~NPrWUjHck@dp%=zWuGH)cz!Qd%n>>$k+=WbDt}`BBR+CPd@NT=} zb48q1pF4Kc{R{0?UesQ8aA&0YKDZ6*A~UntAHXwR!`UX)5FD$}+XMx^TKdJ=HG5}1 z0Dg$Z4$B4G6<`qDU~1I%FmOX2u!H~zR$h)Ms0{FSIZru1vfd5r8{9f63>>~}>~l99 z1YOJsq#2JvuGctfcTi>IRn!~IcB|#GL71}*2`Ro;FW!b_jM>{l+OK3H?T^dL{_f3c zm^k&D-<}VI1#o`bNRm<1R17vxEM(F`XA-k;@R;@Fd_yJYQTKH-L8Z^PkXstsYJ}s1 z578t@_!Y)-zoHz}L>zR0!LuQc0Eg}sgGZ?oa+&~JL&RmXf3m5x!HN`450lhohA+lO zhd>>P0Ho}Zm9S~r&@FJ{ycXz&mBY|66QU+(Fk`*;3(Yhrk}yWMIT>@%EeEG;np1wa z7NcIq=v0I(4Kk$a)-a&pK%t3f_u!I4d2qM8Jz z=HEGLNpNc$Rt^PJh;3^Xs{P;hA^yRAt-jc9k(1_p6vK-j)lHMhd9)N!nY5@ChWaX( zHR?H#Xbd%^p`Jq#KFq4Bg0y^cmyA*E6%p(*S=yMOOVxFdyxGjz43SsqpW1n-O8D&P zBe)^5ObvPlCl#d-gG%fZYsXa6bRNBt4LUwbyR@~0m%fCf#HUaDU1V&k`t zY$ehLyb8jWd*p5YpQJAXSwe{11jM@gfAD~+J7~yKE&-HFrQuCdW8c8C@)ls{;MWxcgwuc9l5+GpI`1sC&06eF@$ ztoaT<`;*NlXX`ZxKmEg&+9CHSB$95h{RqQzj+N`?F_L8Nkyq*2NG|y9!zOc5GR(Z> zS(|BEG&kny4;6RhChq=KzuRC0igEh>e0k&Y7|5aJ2yX+x1Yvh?O6LG}PDMehsR zJ8$7dE>pc1b!000_t)#)KfyyjuRohJpZCY^oBTkSFu^T5KLx3z z71oH)V>2FU=Iar;~EX=~;Z)zt{b z{-)YO_6eZSl11L&!8`mME}}X-kyWCl0o4rRwMCOxE`XxP5+>Y+oVDC08rZVphR0l+ zXj<6bxMTx9prp&G2Uf^?ZpJwTU>md7kt@K20RraA{ktcK!1~KCGeSrirhGl!e zX=^*lrA|#6ecBqed*|nV28xA*u_}HiU$4Y6 z@{=2KZyoVp@dVo=khX>YX)o(!IJZRat=ABYzqJpvk#&>YZ@1?aGSOiEKHzCNOd-;C z5tpU&PiRy*BUDZ>swNe9VTmOs}- zS#yeZbyeq`fW4TKFoGmp@1+O2<15>MA_8uWhk8ci23E)(lMp=fYWN_1+QtZ|J$vSVM>!zbout$h7#h>suDk#U1(i)6Q!KHA z#33}?cM~yq^OJf9|Ds~_0kYhfl&sKz0J@_CB`b<^wqX5o(X`=d3b2%&DhD@E==IK= zObk6U`GH@JgeE1{j5)Q(sVHA>yodH}83tgxco@M5~L$w*Ymn`&`6!3kx zD(%kYa%3nrw2`LOk5#S8u3b0)hZ|PWkpP8~(Uw(d`(JQqmqcDQMD!=mObHZLvDr_7 zDQV4~njB3nS*M*H&6QH?{&etN5M#QR;>PpGC3=2PlvkByJ6^ucfda>5!_tOph_RXiiGJR<*Sw7kfAgdLiX<^+2tfCGK7f~Vx%z_;;Rz21#ir9L3 zw5&l>M#$V%M(~zmOC) z1>`ZLn7COuB6r1kf^ut*U4{E1o0h)FEpw^Mi74YmmvbNsmARijeg177dl)o23xfP* zh%s4x9I|B*1m;7=u%%nEG>X@3Ur~!)_0KInQw5RpCFs?gQ zIhyWk*fF*@$b7$M-&mjq`-RKlDJul;tD zg0hY|#PA=i4H$84a6FU0o4q#;i)%S(jquU$<&}>f{j}zh^vVGpbs2V3x`0K{if*=B z{RTT!^6qu+IRV1y8H1K3uEWHj1cFg+|vTZCMUk$K|kBoT}@~x3Nc^)??=P|-O%Hb ze4P+JZtL*1NPD*|nb&BE1Br=nYR+g~4!mg2s3h*ptW%=g^9EPD%jUx~SqHz@tVe*B zNvwQ9W0frG$>2E$3Sb3`are?6oIVu3QfK%AuI~&T>v#nIg!{gEq@Lbu+iH!yQQ>eV z!_85fZwS^oR+`z=+DJ=HVkVq_PkCaa-sfUK6wjb`vTU%jF*~8kWB9i+{qC@q{OFcE zO9m~Cmg_{v+aa|^cA{z3&yhNX;cI$~>rf1>LyySfJLQ82H&y+)s!*v~6XKYWaChJf zL)gXeXbw}9u#tY>{%FF5AwypHK*Su!0M#+kkXTEkv+hhYzu_v<6B20 z+(TygNK8*UB0-ig=H)hai(O$r=0Y5o7mx9=C7MTBaZMbfu@3u|PfTo|$ z{v^P}YT*KrZE`Ij~sER@UjK7Jk}4O=`DQ3?bod=36F7hkmIM9zue!N*4B2m{DmrRS9T+j zb?dn0v&4zpB4?=U;SGfsF%U{!nv-IJE7fU$1aPZ3Nz1If;#ka{us1~7k3sY`>{OhE zbYW}3k|%Eyid!Li^in4?o{iMety43kNiw&IQ|yw)gPftPsRYhGV5LFZTo?r^wfHg} ze`$7*MY#_KIH+s^E^P2^N1etI#b48)MCw_3p(KDOJ|bgM^2PCX z>iq3okJ(1mZypVxyX9(RF$9CCf`l0V$6=8fj*maqqDjS0d{pI92Bf`8DRQwk0S4-h zJyA=0^Ejf$xX8z745f4tDiTe=w;KE-m`l~Me26O!{sy>{&72J+*r=T1f-WGl(21aT zI?0}eh$Ljc8BWlZYh#dJGFm_1C(2-epyL^HsO)~8Ka zhkqr__ZfY#V&i-FmqL0^1sL?kL1~F&kp{Q#qc38ZB+YA6XcGN66aUcfMDsUPGbhr6 zE?C0SlpyaSofrShm*C&tWWRRSUnc5P&UKV=KUSR?g#6e??&^yE_PGB36Q#XG&oROx z*Q8&cRi@{oK>B24W>QZ>Zv9(D@w2##ex$goaH{-BW@AoGjxQI*1%avMe#hT_M~<KAAY{o}Cv%@Gnwlake1vj%4Wl}tM@WVO4Y$Vn`L%7#m&@WL>l#itT#c&q z`&|Fd4d-uLq)-~vj7`=ngDmHPNMTeP?Jv>(6Rn{?qM=qfKtPC%sPz`gz+kt(M-;lk zlr$^*#HRz3hqOgwO$o&Kwiem8G?}11QV!k}_hHN@#MA`IK|5k>Ognc>gL)NjYnqbD zE8dqy&YPd$$GxA{5aJP1@nWgs#C92PM;SZjFepNc5|9~W77XxdesjgJ zSMMP6vRAl?PjzTQQaEK@ksjrb!PjJoTuG-PSBXpZfkyuPcD$sEOaxp=Yn;`zQMDTa zj{}<(a=TSIdn!VYdfGJ3D_J%`7nl;Xn>0+#LApzI5JOp=69f~!+Pin|WL11>2ZSH~ zaqw#gEq@u02B?8=@wHoU!Ke?6CINdJSC!7JH78xqHeQF*Dvo!)a-~ECP|({+fDby( z_c>5`A5~h(pImL`oU*qm%`V)g6U@vJeDi1qf-^$|*7gU5S$;?_1e{nMeMFHe;w5Oo z$ie{0P8fao!Qoccmuqo1+1JQMPp7dJFQAcd{X*RG^oN~5CNLZp1M+Wf8F+)Ml5h|Z zju|MS!4+B8VQ#pu=K$!NJ6I3yDi=X_6gcAR3h8hiIf;`yi+2aknL$RL4qgZdK!RGk zR_xeD3d6kUaxfQ9iZz7*9SoSe+C*Yvh_vROG$K-y7+k{&7yCo`C!eJn8BL7n9;Fqi zmtlZ002K7EzQ{WIGEv|I_v7eTRrqy|9{ z{#|sVX!JQU%<<0}H!5V@zx7%6{Fh+V@8amyk&wp^QO2c5CaUrVIvT|NmwEV3?N#7y=Mn@-Qenl18Dn;yuuIV2D@(L4F*Picv)GxWgU?_PU!j=|r-& z^%+V5>K;d$#8Ff=qaBGAS?HskQ9=>_Kis_uIMvEH;=A5oB|DCl$V4tQf7_d0bs#+OQT>BNW{0KFl~b8rI_NC%K#Xene2~#;FdpTQ#Z{ZW}R^W@YGut>a>aPkNZ!75yLTD9* zbt$mblEUmu23#%;)|)UueSG-~E=4?KF7Pto>SxP$SAuFZjHscA3$?+)Y z58DP4-yp)>M_UOPi18KfO4Z*$32W|>y@7T-x=%wF?kb{p1 z`yP7TC5z$FD@1PKy4VC;L~OVTQW21fGB8g%%Xw=C+SVqMRAReYZCXcvdm zyQy^o*VK%XPp7ZYhm=l?xPLR2QWmt*;5;2^pivRC6r^`nHljO9?K>!^bei>^eB29p zoQL;dkjR&M^PyL9_BJ(Q+t~SE_(t%9Qba{epndLgXL~a~K#GI|hPjy`pmMeqykw)M z5#vp#75veg;LtDduv47}&rO=#5tob_3hnS~&G6mRU}52MyeG8hTl72B)Cw=N@}0B# zyA%%Q^z<3~$Q{PUE~*+=_a_xxp7!LF#ji+O9nVyFTW*g$^HSDtuJS|4O+M@aLT&Y{ zf}Krsc`Y*ABf2l{zwD|SI{36fVVhG|r>56HnSfGrBl&9bkitI0Zx3sHT{X=H9fHI|C}{ZbaOlen|;30!_&SK6NwnU2W&s4on7u3V_t z)_bhC@@B=2*ocF9xvpa=QAeeOXOEh*lG~)uiq&=X>_HqS<&YTbFp+gKqHc}jxp?yZrkt?7x32Cl z=9_=jzz=zBf{nyEY}2F&IN=qP&0d|7d}uev`u!obV=S^!@N_~+{u|~eiuf(v6yN32Q( zYadeY(=~dtv2$3kuw3jPd^v*d4nuaR=Ef-qwuymXxZFVpPoO^q#*F%$Y7YbM{` zSns(P>A1TygjHPWxy${`diJ2mSI;h7yga*I_SO@;(2#w3(++ZvQ%qO@f-W|obN}av zy4gYftZhi{5X1C$&*W{7yXBwZXVQ4C&gso$GV51(H`0Vjsy+5ZCi~?NQv7QLk$p{x z7RfAox?36EOcx7xDoK8wMSP?qpS08Ya7+4@eJ;0Ju*QYd*^B4XdB%%{zJ7|YKEZy~ z?`rPFM|^Teoz7)7ylB@9gvojssPLL|<|>pwYCH+N41Gst$S*1-_vB-gtd~?2tH6?K zeY4wS;g2`9X%b_PRCR?>Jo8>bQSKsg`k4;)$a+l=C1r=ut7k9B`}q3?!SVzQ4MF{n zS~y;fTYVB@R93xxFK70v|JVrqF|qv?7v*7y-iF5=;3;!Q)gOjkelF$vc($2^N|o@Q zOh_V<`spINXlT+a&$lh#j&RVO+_mrMX|5_`(-Uo+W+%`?`-MI-O*}h>Fm;q}y7lGj zBAtn|jwGN&Yfu@f#K*fYYy`!A60y5!od zS-pB|TNC~EsCGc5z;ac|=Ya>>{99YMSZZnzZM`W8{La)AmI9-AB zyZ!w<{#g49RMV`;kw^z`-wN+|gU6>Tk9_&8nkxFq@9VYf-maD#JW>^*#+gF>mS<%v zu5P!i+zE>}dllg?qrh_f;-0(Ura6ncvjhn)qXoT#w_{iGnx1?3zibk~?MulRoZ4^X zXjWaEO-k~QqvSo9NnA0=*Z2yYRHgcQm0lr+9&IK6Vcsk=os?B(!*ad#z0z2Fgz+J` zC7V;9T#v4NyU9JP<+5%LxO%n6_FXijNE4)|99Y2DPJ{~-6+2)d*UO3EcbmfEN zNVb{Zr;58nAG-BJNX^EttX}Utn?*E1#X9Bz|EnV^ZAol#znh^a!fx6(_W=g|onIDm`k+LyH|LnWf3+b6Zq#X=Tsg*?1WWbHQzs!**2Ro#yZ zF~L$rFHB=YAmi%%739MacclM`w-O7~#UR~nCe6evjXn}Z9;rROS zXI9Cf-1&EccW?5zln8ZUJTC|)FI|1>( zj*gDi*2Xr9(mweJwMfAWUwh;ZKnhkb-m3cXsjPpBcX{nJl)3y*uQhEt!tUR_41_?# zet#J#+(MH8ybKguFLJ3_o%bwXHAtxOi{gkiH=UQ2)1*2tuz1tG?d#V=t9j$re4jed z@1ap5)w~&*FYnnr@cgLh8T@%kE@JI;O{UV1k7a8Y_*iBxP`_1u<49ZpcFm^oYvEPj z$KNkiX`i~dl0;eY!Y?pf8XqTCRjy9GpGi_Ulo{a~%E`8NvfNv@_q)Hm%Ubw6w~`Na z>ATGFQ~XJa-@mZ6ep;2@hWu)DPu{m}r_au*<*`$1KNi$N8c!+rCdF#GiHQ5S#MaT*-l=iqQ7{_^darCr-f zg)ddST*9wu?Vg+GF1XiNCGh3!;^l;u7mG3s16>*q)ZiToHNl%bR_3UXRw&&i~l2gc{yA6Ol7>U0>4Da%h) z0Q~EoGBAuZ`?#}!Z2=FJwgzv)DObXA{_G+8c>_UL8$V3mEAvsL9_KSn8*G~-=IQFI(P|nqkrf4YArYs29VG-#A5Wj=IyRUv zCZlbu>K@^>JR8T>FXUA?BN3i`5^l@5bAMdOXHG6HK7-+Fma1m)==_of^)aa}IB%DQPXDNI=T#2x9cqaSN`p4GF6DNwr| zoBjFP#gdzJ_um?GAHyYH7u|QOPrNE(>W)_Uu^3PUN7|Q0NRE$F3+0^b=<7+GO#Bu;73dysFF(a~QCosBr&8Z0$M3aL6Ghk#!fxA|o5`h|!8 zruO4U3w5ZD zy?3fkO~;kbcQn-0^xkSljGA8!+XL%^A@?p>-E)A1mvCDaI~UGGafcUlJ5LK@djjrG zvIIzIOWY8E6b_y8=q_P&O7>~@=05mUGKBS2TX$zgl{=y7xaytCu7bo1kA@a2FEO5f zlMX-ijhM|LqR+NRyO9mkeobm2nxQ7@`Zar4R(L>)^0o({$0VZojWPkl>XTtTnBciN zq0%Ypb;nx+j)NUtHnR-Zon;@5IS~BXk8%xce|T0iqdIgrZo+1~?lH`;zdn*OQWMq^ zYx*JU{3XsyR>^Dg#h(JNZ6EMI^7xjLn7l)NrNH|iKhCXv0uKHgxH{9nHaD}jdbP@D zWpVAtj|KLs@Abebg3CkgSCbU2X7(rwAQoNz%bnF2Um8mwU8Klf%{CR z_OQL;5O`ACku&2S9}%uP5c;plUFZT&rM1&;c3h1fL&QA`7HNG<@+4@ zN7+oQd1Ya#6R&KEC8Mga{K@c#^(%LZ3e^LR(V{<07K#jB-->`GgKn#hgvZy3Io^&( zS|+qcdVY&K_Q<2-hVFCD2dVoCbCbAy((VwmK3R?Fg~ zVvvllzrAEzk&uEkk8};Hi-zuhGQ%qPg43?DV%WmJAWn1Fdvu3#ZeW`{=8>2^O3IkC z?|R7L`bQmxzJ6G}ohNp~b!tMm*zZiyrhxQsm@9BFsOVUHICa-*ktw6;T=L+;_dpyQ6ZtvUUiOEq?1wQ0`OyUGI8*cy*7|FtJuf^vS<2P_fjf z+ z#+?II7`MAej~fuon$=niE|zL3`i~bao-c3tpo8>H#`Ij5H#iE_Ksz-p?>pddqbKfR zT3@x|zInym(THRE`IqU0h! zP2|X&kpQ(ED_Qg)Lx_?7fx{nydv_U^<}x6&Ck>7!i@En+zJAio(1Fe{{j0^uflud+ zv{DAeZYbwxdF%C-yn2}V7PoBGT{qvVDlw|e6u-?n=WUyk1sZ-d13sXidiY6+(Azd` z+x`N5H?Pz6PpJd->G06Q#^y(5<-(mVyOVEg?B=M;aOcl0TwF?y(ukSWDM`YkD4zvn za^7Jtb#j?mX&1Y`itje98j>tBaHGgFg$SMW6Nr#4zG%c+LJDn+1l=}1Bf%090&3;E z|DcrDs8=c<9eS-Bc`Y*veOYZM0n^6Ch-TCjTG9BPCn+a3biu4yCiPXc@%zffuX=E@ z%$`DO%B7Ch{6wysx%Z!4iyyq=~U^9|H-mt_IV zxm``-3FGW8sSZtHp*{Xo+xa(VqB26#5C`hMlp>W`T8tGmp`ZHBx1ZD|KU3osGTOZ~ z8s4+>=*=hZzeuf&paf20PRZ5IUbnKDynZ+-?`kP^26q3=tB7wdV{I)HZ7GbQ#TaCB ztr2s+pkRp29^aNCB2fS|QWs7X{w^)tkM(=`x%7jylL01n?*2_gg_MbY43AzZwR8Zh z5VG&GKt!h9l(Eok(@E~I%k>3W)}b!j_j@Bg3k7S3TCq1ZJoz%q22qr_a3ZeYeXFq@ zQa)B=^;yGvg;=cxVTqJF1}{eOM%k2scTVlu%XJjI!!|Az5dE>yG_$81575HVtx>JP ze%hk-zWqY)yBep04p^E!V-B~IKcSHKUKcvtFW{9%&unT>Dm}v;7H1xD=k(ACi9Q82*a`JS%t=Okho6NYg-b>ZX zA;`;mswehb=&lZ*nyh3Oc#uQ^w?ivgm(qFZ54S9|9~}5SaZc%{{xwu1!7!xcOR{1xx%qQNJx>tN zZeh&GOU44Op{bJ@isiLFw{l~Gz8p6U8K!gX-@Q;4k!t5zeX;VhmaJuSq63HXB&ylv zJiXDk(9DwhUBZFQgV>%54%NG3SrL7Ned3K=C+9xw3k@$eE0M@Lr`=OidBNxV{=TO- z&Wfr$;n^4L-#LD5u_b8-e?e=En~80q*Q`iY7*4tWVzNGWiuaK&#!GMCH8e|q{1IPM zKEN})5_|lqXMsx#$5`<5Oy7lkCC{C!O#zdVa+VJ!UHLD+xZaT*=tD@99%lRD(r4 zR9na5Oy4rE=3TlV7pDH$tguk7NGK$tPNB6ye=IPzon_~jfM??hQ2Tu%J5RUr4f^K$ zJ=H(3T)dy{=IeW1jfZ)v5>vX6lOw_G!KoML!bH&J*1p|(tY_!y{%L&**r5gU9j~Q^oZ1A=WNMUt_O9ab98}I+(eAV z&9s4G{=kUO*01@Y%t~nfjy(yMxdwG`zLa;sWfG|UvJxLSTc2YeLm!gQ$4p9;Uij`7 z?`9GmH7zuq%_0AIl3x|hdAI#5n%{+$TG@NPFKLg_<+tNwW7j;sl+#^fo2>$p6&N`@(L8~>>lZS!jnYrj;M zNrUMlp`oyUW4vx}#_qG3XiY1J%SV&}0io*a}Areb?kiw0kG zYL2+(3;S%X!hIe#Q^ozsA`auXceW|^l^52CbBOKK`FMBcKJzF;=j%_XgPxP{hQ*K* zbxOy^Vjbm`k`Oy8*xBYg5=!JI$VrEs2`IRvf<%(<%so?yAJK}hIMvX+9SuGAO7HgF zov#t*6ryhHuTS}|8#%PADa*1ZPUHS4;ap@oZ&WHn(f!6XXWhFOVpz6k#v&!;1!tPh zM5tb=@x9y9ghr((PpzoxZJmbL+pZ5>*nYjI?n$9EA9HkIF=a?!y zk*<5gM)qC=oS9eQY1>@&5Nc^RJ+XCyp3$Y?$~LIl5#Dz}*|2+_?d|AiX7Z^XhW@U) zDMe#(#jo~qf#X(OU)WU*1O*MP#cDj76P}ZcX>7d?)AED(*XCMoU%_HyuYyhQm})H; zX@xqAl?~^-XIKQAExK>d4@_kwp)McG;4f=3%JI6gDXOU-TrhUPGN*W z>}9xSa)O4=l@CXG5;O`g@dmx7s~$Zj*RDcLRTUVo+hKiU0CdUuocq4}`Env$Tl*{Mv<@*q_SwEMjwDY-m%`#_#}fpuPoU0oim^=xQqqeY zh$1{t3&C+I!<*Viy1W_dp6)%vJ$`dPwLELJE8Tc*p@f>TGnW}YJFgO~X3YNlGFDD9 zimo&IdSiQI3Hkf`8W-2H)a&IVs1gNTzr2hDDgJFs8JRhIk2BmY=*QfO9UrRit346Q z>!sS7n3gj1TEDH^XH3ZK+sGq>?`$e-3_7m2>vhf>wENuj-PV|LkLghLVU5%b1?JOQ zch7uPEs%!4TJ3a9Inp~fI`4n$loPYANhhmq`FBRKshXH*{xPF-6z*cD z*na>Seq-WzqAmxEMEpg|8<`+QYd2Lr_=G-Ssys~Y8ZG>5dprN^G1C3*%U&(sM^ zL}#{rnD%qaJKLJ&HTvF(^I7Wgk6xcBQk6+#t;*GCeZ#cfhw3Kg0RrZ82bk+~^ThTGi95?C0ffqIR$ntwt=uY?` z?!P7~c(@H@ZBra$GWf7M^mVv*Tc(4Acw~jjYo4eyg)tsFZ+dv-z=P+N3ZM5yRIKi( zwZU+C&pw-GTcd+>pl|ab)z*}w1k(;{ztH3LSe2I){7lzM<$? zz-8WW!^zaReNBx#9L+4%2GU^pK##hT14A~Q;|P4TZ%EjQps5$3^g$JM=Z*TKxjvoK zbJmdWlbODlZ9f}$HLY*(uzbSVqys!>q%UVye}{{6mga@VNu43x3Zm4og=-2p=`BO*oBhoUb?EmQIfzzEfgOLb@q%;?B}=ab;Fi^ zY&^tw_Sw&bejFC+_(u9}s!*YI3K4WL)LD zb73}B5VUGcyq^vDA){|}n!n@Gq@b7q7WUACR4ac4CysDRT$r5c5cso8|5Lx>e65=L zyS`!-9_ItiMh{yS{F^obYO?Jj+JXL}u= zaqeZSQj)BGiT}(*UQpHJZ3z$B{o6z{!n_?U#FOh2O!uf?6x9hE7vBY0%6RqZxjObJ zy8MpZi<(En-QP)j9_Je;*5~QotXLa9*L@*cEs@{9q;EKB+!Yb1?@CQ}>K%BbGN1Ky zE)9VhHSF;2K^f_07umcd>aO73)6;7lRvdk}D-5Q3?-mJc-)_};5k{b2ySjCWuS-~(?M{4G7p&Pe5wsmq+V-;R zDw(w7jP+iWUo*{c*DqHonNMK7iH1lv2mVxqhz=& zFrRYCj!93Jxtf49b?K;!fBEXxbIqqWqgw|iK35O;90xY(o(3q68f|~dr&uf%TgM&D zjP~ea*JN`v)=7rOSTks1&lE$Bg?sX^oSRN(vVQy~K6i2DjJkKRFcdg1^piSppx#bx z+c@lVgTUyl<2h4b7pI*yx~*PVwrCv_*>_ejw!iqZo|VN}N!btg9eg`lZoD2>VDkKW zFtfWu`OfV#)IBHWBu*Gb4VAdETj`5rjNOrkZ95z9WRZC>_k~{ur^AuHN^nuej!6m+ z!3Z?pS|9x0NNJ)|MCrwdPL`ULLA#b}wfh`hQkSe$K+5zbzDI4VUsMFu=p^nwZ%+QG znA7>p+L`B5z>|1ZgUjMlHrmcEZnqkpf_4Rb85iK_F!6mVv`2#%KNofH?sCN~htFwFOnEatb>QA?KWE&-c#)3=2QM!+^*)SL71H(qUQ(-}+5_Qi+U z=-pI*7ul!eAHAdrzsSodcOl}LWOny_mi5VaKI+Pw2C4pY@pUzg?{=dx*JI68cFP?& z?&iIt-6nSU$m6V8`X}cW`bc7#-ELhu(wn#GMD(nkOto$tO}EjOn&%l!)uE<@TWQHZ z-=}_RfiVBtgM$fri&(^=Ji?PF7s}endn|gqU>n9PQU=TpM8t`r48Y4HM z^~3rMS!9gdT|I2w^{rhu9)eh8Xuk@9;Xj3~cbRD8z_RJMJKBv;|L=5^MMe`076Lw7 z^o~VF11*H6wOs=Y5&}N|WuH|D1R~I5bMU&c428fOj0%A_ItX->_7Fk)u|bOvc)hp4 zH#ZasK{j*pfo8^niMEa z3X~=VN|OSmNrBR&z-Ur7uDUbaY|o90-Tq-v@YV+X=QU^lA6xVfb_A>oTrIY7yBN?F z4d6PhjT^N#eh1Fd*x+UTIZ_Dt{I~56qcI5EU~n^M8>fH#gEPq1oNYd2+zd3z}vZG))Q^L=yuB z(d2+Z8-g~|4+d=r{hfdK*8GD(>-zum4DdhjdoYN`B^bD1b)8Hwh{h%uM57Z7`pGA4 zst0m|pM27$^I!O+P3^z)iTF2sZe#)fyZHPoqXGZq6a14;T2k{XUmMi^&KL6E@U@W( z{qN%IXI=;Xnb?78ncXiu(XzbXc|vXCX~Tzs#QSHq4Bkix|CjOf)1PSB?XUc74AtNH zLH|2`);;e(kDtxi=l>+8{Y>`3KeK%>E#3cxGg|8ZJ7?g{CD`vywNWPg=aKe5BI;-G z|5@{Ze-=GpTGjI_VSfx(;LRn@?}TksOaFO%tfoT=+Kf>|mT>Ky1{bMffw!X;b z0j$XZ>47%h4p?{kS(0sJw?Bs{ja;FPiq=qQqpDiZd}+l6n6@+pSmW=-xQ(fIQ+lyc zq5p&Ff2s!mT)U#>BY=vZi&5a8D^B2_>q%f*$pnOsjUWrGiv3lrY~6%EhKIkK00y#d zTK9i0B?2-x2(;YXjrB43vl}>R8>~Bq0wz&F9@ch$u_kloteFr3Dh)+|KtLgdM53e- zu=U?S06jQqCtG_6*~1nBb422{fWSduX)qMMUYu<}AdVhxXipCm2tlN50f8aF(opoj zv_Kqj1UT6d;bI3DGXH78|Ds_yJjK?<1r76XgkiVDz!1Nvv9rOF$xavw90ek6g+ZVI zE;cod;y`soxqxvl&aTj{FmO0T8np@IPWC`UKoByVf^^sl1EaNV-4i!h^K$WUvUMl8 zLS2A0qAfUpfnm~sL;a1xdlBFs9u6*eTTg2t^PevNXWstCzzOzXSEw7&%NYnsTVSAI z;B?(hdOS%Uwm6t40qTkZEc$ay{TF(0I2Q!fkxaI>wL@)*`HcgNle?!Y0&MR<#cu@x z!%^$z*+^qHc(aFlU@;(PiZ#Uphy*_e`hU?jBmr#aN`fN@Fqr+87!djw4jkRBo$c+A zRC}}oWGf5;24DAszqxiJI{?kXlWnOs$gMDN7;t{^CJX{fcB8;>IH(H|*wFZMe_(L* zFMeR>iE_4e#Dn4Xz^dUE7$_34sXur-X;+F30^>x2*xPL^-Tw@YzcB|#11BzTY8?_r zLK8{$I0&BX3at14-vlAy8;1RBJlG=PNLNn}XIFayz};4w;P79FAbZ)8Tqz!&PF^5j zKx_p9A%6kcT0@}jzB<*b)Q<=5%0I{@FZ7yr(pT;O=Y0m>o;+(eD^&5!f?m(hqZJe!P|rXG=l;zJQFzAKnS)g@)P^ z?5G5ftwF$yK|2AS26A+Db)!1kK|Sn9z^u82CNLVvHa1xbi?GMxF;tKv0Y=>#1O;L_ z09xPM+OV&uHQdg|#Tnw{jIsq#Kl8pn4UfNB$4OH$6qo}X;Rd#c{TtA_qyG&8#*`h| z1_dX%Q3w#yI_ZB_1^jF2CKOJZfJGslA#ji#1-Z4V-$4i*2;xAnMcCVd*Gb<(69Nb> zoA`6XTSK9?K<16IUk{jDfFQs?+yp{7W8fG&G}r@OI z3iqUJ34)=3(Yr|#-qFDpiGetxuvGHaAlR?Xv%xxgy4gArun5$~MmBA0;Wz6-(Z3Sm zNO8xap;QEtNOjp#6BPEF_qpH*K#~dfB-*XVi9ZtoL2Zr`fJ?X`JUpz)Bq9lIvmR!* zKmqy3W+pr_b}$mc%@vJxT_5rPH$e~}cqN`!{HPl1F-yiOzQR&Nzt2hEMz%26St)BRPAbys#va zryT_6xMc$ozk}Qmwl?la6afv{m~Z~92?W~IKwwC@d6FnNw7U($ldz?x^>X@Oy~0Vm zLcoqhD9Y0vg4~D`f7Ap+Z=OVP(q2>~0?17r9BdpmoZ`PKwx3w$%4zlsWIAXkT*1-KV z>lMV$#irj}2@2$kn+KJf3)l^Ubal3+(9(%NQUL)N_UE<3lN)t)t0S5>vseH9n{xJx1(T))(8p_;f7tW6923V3S89mFBndm z4258*1Un~;8+7Xi{tm*EyzFpfIEV!C0#-G)(ggo?qKBdo7!PY4FcR0jc8k^l<74wk z#G^oP2MQHJ03p{$;-5hfAfMeFGx0XgFeua!jUZvx$NZl`UDDL$*3JoHZ3hP4726W_OBn%>?f`NidlFGNPx3l{KNq5Z*Yz8S zY72)@aU_^65wmXIEkNr<_TOFsXo5I9L#aeEaOT#!r~erQNB;`)Lc@rzPPSkSXv0)n zfPl4{%~8q04nl?yarO|Z+j?&QXAlhWYbZnEp#)b1$=1`+Ykex+0`yCtfQ%b$PavRB zC@{r#BTxS`3i?}V%<9br|-vc3R2_T4*m!pjb zbi*h9*ee7u`)+n5ILW~S1A}ATt&!`c(VsQJfy#0-$O(9r$_369fWmUKA~&3!C&(R0yFFm*5$VsZgZ-RGffRmYfx?|iq#)fKP!wBF zVAtWVji%qNyS~4($&EbST)l`Cd$f!7#sa|4-KpO|u-|4jPYf1Fl7n9M_A8pD)w?*1X@RcpSql=*r!2c)!DbM<1DvQicG7%&@Pc`GLsZhk@7gyp&BVswp(h7=P%a4#VM9AF zbdtF;)A5F?V|H&vbmXpVS9^TLFheRGgWo0k-umIB;TU0W(@ncW`eS4+i9@0c4ecvS zz1io&h9&fvtjpdvD$MtS~|@5iMDk?+3?Z1oy?qy_dLtWpVEfraIp1dJEp?nlCEQz3bmj?x`;?jE*8N>^hUQ z{Zr9U-vd8+-5w%mlp>p~n2w&#OQxOrhhE5ye;e+o9*&qyEK9E1v+~)#fYfzLbZwjE zl^t|-!VWiCI<60sq*5x9Yrr<@o|v|lAS;+XDOxw|3t0v-Fa#R5*{wU0I0 zGI=2$7lKjK3bOOmnEac4{Z!oN9VzWdafO#HDyE}I_|SE0!70hJ6#|h`m(`##?`K0P zd+KY;8Ah|h7_?^X?_E zhWU4X(E})rkl72ens;?>zL8-~zh-W8hCL;HGPRfKrx16dwT2~OzVqti74Ewm2Fa1l$D?1e1o^B(=_)@ssqzGsUsGhCi&_6w(&|1 zsa3BXR$Y0x!*|@j(oQAma^)5E;Oc4jW80;=L5ko&4TPT8>iS{d$$X2-$HPbZNbr=Cvr20dOvO>p zV$Fi>J}@(sk11p+oMx2f9eL$pHURzbamHXRIdQ+ZgGv%NmvnUVsB5w68@-S235n31 zQ>G6O_$BU+>y~}tl5iztcigq}4;6M>?s=!1u60v+HFM1Qfo@5~vB%8E-yV9<;+IYi zc4n48@Fv7rGC7B*2g2XA+7qjB*UU*DV%6KiGNvtl?-y0vc_P8T@rWj&efMh z)s+~A&MS9-lS+NhJ6MD=UW|DqA@aGiAnAnMxLLRKc$&ms(NvH+N=p8SkHlk{K=TLZ zzuOO#Tp1Ed;2Vv-vJ@a#sMx?6#yHQ}$34S0&h+T;nHkiiS4bGkOtr^ub(W7QOW+>< zy?TfdVR`v}xxKbc3A})>{bY^lqhc*T@zMDh+ACBa8LZyEyVos zk~B-d?Ud08jil@sVIO08^eo>G+8i2SRQa*@gSUVgktu(B_?g-4t}E5zYM6R9WDr3I9Z;mrIssE5ANbuD} z?d-RY3ZGBwAOCu4c+VJ>cS4CpFvutU(kk@LuF2?Yyk7#C7VEW-zlzfis5%xr`0ZO* z^kCQtYTp3ds$l7Q9crgH^Uwp(P40o+Oee4M`kK{Q2Gozw4w>#5RqxP_%M>1n4!C`J zC&Oyx%xA^d6CJmctgH<}L%VL2!PPQ2qpAiDM?_W%HZT>-=Ea+{wuK(+iKMGPebeLa zQ)n5l1d3?Y^m$a`CQ{S?k!{3-w#l<>hPDO_%_+>Z56&QT@T{2b8L&LpwcL(H zhnO`!R1+F7v!%@DjK?gB$yfe)3rIn$PIhn?o7D{9P_H2h(gB)BB$ zj1L%Ydq}uDXhN?D7wQ0C5l|}*JrsJLX=tzN4mJKpj^~gohJm1y-+YugxdKU&YWyu6 zC;3&cFH8CjUSnIa`%tgOpU59*&s@u0AJMRvtAUYD3#OK8b0UgA>UiMsx(JEAQ4E~S zfxA!E$Ul2QWjH{8KqOgkyA(&um@@lHhUtDrO7DJj4 zj^R^fO^RMoOUPQT0FPcO{#O65pqqyMGtCzT?QPu$`}E^5XkqDkY;EmWAGl^f0yI^hvZTz{9nn>=kVE0iAT2W#*ychis;ZuYD)xD-4pKuXeh#mrw|U=uB?1?$gST z=aaVYWpBKg9;Q(wOv&v`bptT9VUpJ76u z)~eFX$gbL%okr&!9Q2;O@4V@lPWf{bd!J$e=6%om>8ba`@mWeD;cJP^*`?9hw{K_T zWnCLwylb0!4p8a7`ta>mn98Q^UVsMf;NYVlvni?IkrwzspfqpZ|b?Y?Dbh z1ouDcigEZ>T2}hr4nH=IfAlu9RNMX`m)p)`JkDa;wl8mMXwN+gMatde8w*~H3%CcI zf=zi))SRI$rK|l~?xsC&MpiV$l3dw5x>k831{A){i7{Reqi?Ysy2n>zIx8okN6SNC z$AOE_S`#O5MzbuUol$VRVs!=@rSn}@})J8!7wy=*wI zF<=bJv5^IxTLzm}Ma{f+Xo()#FW!2KJQ7#>>Ee%5tm#EOt*pu_7Cpe_ctJ2&I-1E95y&U^E-78!BWZ3HT-o0MM zSI(K6^wrKQj9?tS9D3v<1a`z4sok|wbYGNjdYF;l`Q}3IJuK|~&0ZTxE9as->Bo;m zdfGJ8r*m~%3Z=6>O3?Hu`nBEdu6>Nf5=;96k17aAw{KV87yq6?|G|wz_bK7V#$m=_ zP#u}YN$=ozWa(qXlNWDdh~ty7xmiqsMxSRJm=fc(xeP|=?jOb6cSqcj7lQCxdGs78 zg2zvo=$YCk9T3!!JcE&$c=06nXfyE?bbp}a^Nh2UmN!^@%!>?HcRt=c>1(ce1B-#> zIRX<3AMSYW`nqlU$VB5CA-Vgf_wCr3sPOtttcgGphvS>!(dZdlBtCD-BBMZElaKe& zn2*M{)i%RtIio^HRaQ}VGpksw!nF6lt=G8I=gz7&c$-HKdTW1u6JkN5$&xaao8(bu z|H)B2J6rtd$+5;8s5i6UZu1rDsLQs$tCTa%SNt$kwRjzwjh*&-Mfqgo*kBK zn)WIxnYFKtw;`r>MjS+G!&Qw>=hP$@tn5$?F;$NFaP4IN`*y2yy@{XZo{5A-7yGla zR+^Gbyfq0|+bLW=q>B&JPIBnW`Ple|3EaYb;CU6L-4piMck{f2!)8ebl`j_b{F} z62Io8_KTXb!%qr}OjU?biK&G(*LS{IQA{a|`k?idYqCir>(fJu)m$4>IaKlf?brCC z{gaEG8XucvGr<>~S*r|g5pZB{hfs`%2S%gsC_H12IRCMX(|2sfA1z z$L{X<(5)ke&fQ*c4PJX8@0*j#;Dz|I13JysFFR6m>GSt|(F4Dg5Vvn(oDlr-#%V&ZwylPBvACLIXRJxc zZ&^KA`7{m=7@=R2Bj<_OKnhaDn^g$u&oh>XSDJWZefT2ml*@s4Rv z6Y@g&=S4nlE~mL8Mk=;@6v0cd86BU%yUT53`S4i*Vel}$7Gp*gx#_{qCx=}GUhcS_ zpEh*<)b05NHAa%}56k{i=el6$-2aETw~VTz+1hq-4Hn!9B*9%4Zo%E%oj`Dc6P)0| z-Q5Z9?(V@YxVy_~@@#qDv%hzr@88MD$mrFps=8{<>Lygzb>F3n`}?&uaf-53PS+Zx zEtTPr(b2G+kB*Kb;VUbItj~CN-&Wt-lK5`ZvuVi)u9QgqAV_wgLT5`DVLU3`I=FP- zzBU>ALDt&7@t_oxzF}KbI9)_pJi@TEscFw8c{$_g(uznK8}sm%zrBNtmv&Qqpt8dm ziTgx3CFo`+L~X&wDNlg!XkVi#ul&?3=cM@dRQ}U>Z8`QgVU-_y44bSOR_Huo$8K_Ty+7-5Ktp&xZ6w>P zdK^b59$KQIy3x9<&bv)%IjsImV>&5$Mg+NzkLBlGWG%a~xYl87OOnKw<{G-L1x@vq z6%`W)^`$S{3zpH;_?Ht1@ux)Ys7z)^7kKz?zq}Ma@>#~E5SZiRm}SRjmyLqQ8x@Ul zv2GnD4<1;W@9mnI@8z@!``(#&E0XeHyUEL&^=8KmY}ruVy;w?A$C)Tmg^7O=E-l@Q zBc`-TTDSdFOT{XgEiU~_WhWoo^hkAkvPukgxpocmlBN+W#XmLGpW6nDe$nkMOVUS4 z^`NfA8c?Sb9@n)Q%g9lli1=q8^~97fYH7yqa)}ns6txR^PP+x=c-{{(;~>H1^4+Ga10N309~*K#)b?-jX2o(C(MmtK)NBu9#-(|>wZ)Z7RTyK&#YtqvnF!PE zS%$JvZAkg=^@!L9U0-{~|GIyG$exUeF=fIUXP?{2;r&Crx67=BDbCsg3}afIsVVt8}=8cJ}<7= zeNM$oRCR-;cG><*J7YcKjKO3o_aR~ltUA}$rO6tnvb*`ZFN9?kkNQi_EeBS$p1XXJ zw40v3(7!U@)enP&^IulCCRYI)1gag>dt%rzsPO;P6Jh)V*8h*ix>wQxSeP@k1NJ0Z z+xo%fd3fHUv>UHCX&BK4J<$k z>f0OtAv^xt*c2TN99(RG@qZE*dM*5_1;7N1LR?76T2x#}THgkcQ!xStYVP3j=d_}W zJup^rD--Ki@f09N=JpPDE+iBJfU=1(B?7&yosqGfxs@pi#XqY7ClwuSY%GnxyvBdE`bq1`6GasZK*m8=E%OJ#WX=xsd+7U>xmDY1>Kf z!=%MCDep04(0BC?>8Uyg!uq%mpbrM9B&uPO|kNM=DU7I6oDj-1{DW~_@ok>>Du z#vYbx_V%On*w!r8Gu5)GxqXu;PYi{5JFt--M^XOsx%9f*`u}hr|LazKy$6A3#sAg) zD{N(GZS;D({p&8J75nGI$w(h~n7lsEfR`M9iHiU;Ovzf=${eU;4BTITxTL>NwpR&- zfAs91uO^t7Ss4C5UQIAEa)5x_ngn$^$mm$9ApL4s4u^9{$f`W?o(S7e2)zeXo6xOiaQ@jqN=ix*eM&0S&aKVV z+WpQly@NjgLcZ^wf72FR^P^7@wNc<#fHr8DrkHn9@Vo!2sELQ3yGb_cP^g z8_Va}H&4EsM5?1BjRTzXR= zC#1>H&Ue6jo5dAH!*(a@bU6iAv{5&x$RiS>`yT8$aZuPvhGNj7`*7~C7~+O}ExcV6 z^T%wYMZ6G4vu-ZV`!;#FuAdkAQ?_hMxjMgtHFs|bKHG$!?i4^lXG*;vCwhDYQxw7@ zMY|_x^3g)yb-#LCgCBW60BdvL$tSg+8Dc*s&$WioBxu>2?eqi~uJ$GZXH&;VnD< zFO1l%o&wYnfq1ZUUNZ&_|A1_73h!!BYZ6#jyonm5n1F2lN=S8f>hoq5iQu`=m%SOb zW*?KF)Hb{B!@ONN2mRd?y*`%8OHL#7WE*859Q6p$H;gh5-Vp^x1#WlFXyGstmARyS zhBn=^aiY>nhZ@tAb8_yYNLYn%NP!B3HaX{JO}*;nmt0STzA~GhJf0dxj&aiCF%KYw zd2R_%4WZ&W zIqt5Bcoju+Wx--DRtGG`uuB1MXjINY%+ zKe2E?m{fWYT%T8_%7*%>`fm3Q$>wT0Te-8e`jGG!I>VDSY0Mk651pM6xy-Hlq)TZJ zwd^HTxqcN`w9#nR=8h}Oc{pU|78^f)pZMB}X0hIz(0Qm9LU=Vc=+DVK^jUTUxtN16 zIyH5&b9BnwnN3=sO^Ts|rvr)&ocqL5A3N!$dahwNw1m9!Fr>+EUa`q2P`xyc#SM@Z zZ>f|_VC8D^*Dmp%Af1X9C^omkPOh!6*uuRxomcESq-d&;JDxAO?(z56_xJM7*DX9I z?kG6+SK5Nk;9o#%X7^oIx2y`7Kx8W(K|nQeGS~mbdNR*Pjl^FK@m*)+C~Qh&t|fUf zng91xGdB#j8p4CAa*mICIA@kHbfP+D0ppFf&ew!xm}pIe@Ll;4*5F)gd$-RWXoz?z z$}3Qg@WuSn^(T!+b$a(*h?v>*XsfvvQO?#dq#U~2yz~=1rN4l?cnuO|RS>`I zPWYT_jVnZi6yH(87SV;t5m5hx3ICD)=21B*?X<+9p(2qpF z_Ouy?lnxCP0=srA_b4}o7tWD1w1Bm+xA3dF&T+nR$C2Ej%{kXi`m*S>@nCt%Eu?PB zv|BEChv*%Ve&3j5A%L=+e|2*ChAyu;dyqOCiwWZ8fjEt z)arEDHNxj*;>`U?^t*Nc=?`MTbO&O3e;woy1Wn>=dFO-Ejnm-oIp3utxCcpBLY?_G zioZ7@x#OwTZ`>QQDEJ`U5|3NvA(UllJg^A&4 z3|$F|c)m+QHs*^Vp!k&eUT_}Sm?LII?mQ#3m(RP5BCy|YlsTz~`a4&MYvoOi!1^Pk z(#Q8%^Md3`z1W`;p`P~4(rn;jBe8ZJJs~78oa zI?@G7{n(Xg66EVYLXi?gU~xylM2S_Wt>Xnb96oUwZ}p=D>|kdY@FMdJ(0}T>MWzT$ z5k6X9u<_)k>gCI3y>PS*k&-V`Q`<`RrKlh%53!;izh zb4<@?^uA45foaTmGEiDivXIuBD%4i5a7>-e@=is8B5eGv-m96_uHD8htG2Iot4)hX zWy!3D$AWiZv*S88%eJ(Fb~>012pI&v3MwZ{d`*k#YCL@xW#7PNW;vgTHy0cOYgS2; z_8v`{pGx*y?&1wksM2jcay@HD=X8}@boZMM((;dKo)7o)T(_I=PHQ;HT5YwRJWlR4 zbUysr5)Cafax6Ruuz;K%s~^<!72~eTThhWml}ZT$)V;w{)gsi|~hD#}1E?W2*xjpMyp%$hvQv z_bJ-v#mo>k_NlH=F({>h2V72WK@)3`Yr(%VJmCOuUHGl3`yJ&_yn=m$X`G;b`~oYX zFcP-!-k#{w1U3A$@-#y+bs}dJ;57nY1Q2T>m*WE(vmC|)TE@`}i1#Cy1o)6eWU~lZ zFv{DZomtH|sD~*#HC(#ZQS7JYk!+v@EsU%`L){pzkrdz(K&{NaOcvq^)46&SF& z?Q~o8s2U)6GXgrS+!gFm5g)v4ZWv%A8p{?5YuPq(QsqKijs68L4H7AGY^hM=^6~t#~iLeP|*7T5!=MHGzq?Q zY-8rV(E>&t{BDc^Fx|k*c;U~0>ygNBm*}fFH@;24;r;0Rjou`)US~ zs~^ta<_?|C+zTZ=QikqC~VjKph9+ zK-bgQfS^sRU=s{QB$J2Voj+X zKS(;qT#m3mJvGD;l|EdW*y5McJV%SRCgv{$`&ut&Z`Ia>1FS5(6($7udLin0h-D;vtrrD+gKW$4P-bQU` z!X`?2XVFq|P7vT=ta4H^GQQ)KU|IKjkJw}=zf#db_zou7{)SHTtaIgqj;D18AhJDw?t@S0A zy7Kpxl`X;stBlz&m%Ci9O55cTXLpxe*EqkvJgcr_dtE?;o&kd@K_X|S=cEVdF2did%p8G2M8V*m7qy`3ut9DZyFCK%P{*&aEcV&RizSwRUwpK zApxhf4h#z5h&~t&f`p&wevb?J z79B1~9GMAE{Cqe)zR!|Rq*6k(m!^$5Imd%8EL!J~_Y2lDZl4BkdqcfIZ}st{r#Z{; zJZ$L>`fXPRPZw$l=gf}^3{sU-zG&s>89ipN9K4MIx@Pqx8>J*K5b>5fiUj=avTuE8 zC-yt-F8E!=iuDeMGCQ4uROoHt7$`>ZfuOUB3~Kwp^YXB$GDl67YNzFdi&F=(x8m*t z`fFlj5Jho@AGq|KSb#H)-vP1a0ldU3*3CuT2ip>@5=Mu^(rR#;q(JBXK>m6`2P`>RHRxNkqALytQtd10=&d z3YZR>4s3Ef>8Foi44qxuZ{TB$@8;}e-g*0Ft9Dq6MZ4;%?FJ+`ZYFjm-FyEM6{YH8KVuh0Co=ut%Qu}a4?lNHU)=$DbCS-%jW zAvLk?GUnUu?vm39+tK1&z)rW$|)ycM8OE2q!1GXFT zcI>dUsPzlzAMQM-{`};A8{zJW*8)GoZ_q|<4@I1l$l}tFKC!PJ7jL`hZWEDsxK#Vd zfKLn$2JRlME6Y*W;FNsk*2{`VJKHTGDR7beG_EO?PA=3pcRHA5oUCmxL6u^yU4J+2 z$1$WOhDY5HZlzOyzL$z0Tt91M%qvoy-|~1x{(`q{Mg+L^>?d0U=3`^gYbx=hhTx&5 zgL5B1>!?P!I24@nL{q!9pT1i3-$qiaRToLb-~@BV?Mdf)9xJ9CD65?F3EPN!wV|Kyz_H+=TBM@zrP4Mh`RU75s4*jB3ejb)W{}^JVy1Gq= zxO_~E=2Pb*G7PzJ%UD<^xVf)mh%RMzK0G=XZl@7CDl~uw8HMR zL6eO}vr)d0!(}Gef*Rq@Z{Xp#MhoM^2z^-_SZO5=Ju{y2v zzO7`ZwST_yxdQD-T1%{wi~Hs@bjmz0KHMepAg{95ewh-FM=XUJVoyAQxF5ge>cp$C z`(`s$IHu4wnK|9u(f?7e9Jak52j_wVMEHq&&zGAlk;9P8TSU_q{stb{qvj7I0=eU> z3Pv^g;;nKe0GKdNh^CyuZqHm6EUi+a$VXZk>k1EhZLnZ6JbS@nJy8>!|QF*7x2gqbas-&)I$J;i`Eamh%ms zqV5kmV6blP(6E>i!VjQJ;IbeqF7YijzTHQ0G6_wq(YTl44B;wSdgno@yiwq<87!HD z@AIUzH8*K88{CuMN@6^^8oaGPa_t|zEd_(bhJQJ1yyj;UqZ4sBbE||Zc|Y~Na;X5T z9#t9p4Rb8>iito#HxwB=M!#hU%tsPbb=*W>M-HjR=j`|5*5zF~ux(u(+tKgbK*SBs zy+3#O*;`MKAX6fugI7D?>BUCNb>}IcF7-xGCY=tDJBkz~(!nLB-l#XVWiaQaMnKSH zlvrykoQp8yK(>1+wGPYx*Rx|V`(K&dtOJrGTorauC%fg58c#9c3i;NXs>Lu*ana!$w z`6U=rM5bi!-z9XIhfO)#L^uZEFodp$^m8kjWWy*EJW#%BJGpE()T@qhDt6SVltvip z+gkj(q8E}EUb1Bn=|WlJFW+T}j=Z_>`WKo64l~h_R;>B$kncd>bENZOJTfUG0}LtwJm(Q?>hcrW&Mt%LI7kVXJj1ZaI~rZ*b^i^_E7>h z=(*c(sfB$#Rk)rj?ewW{j_Qe~!DKa-H^L%h%4yI6quP?TZ>B1)2sJyPC#G1cifXN7 z-+<7`fqiHEp>fR7C@EV=^huNO%E6Icw_$B~B7YM`grysCA$qi6P{Qu$eaSIH$FY0T z=G2S_VbMGdTkvfN6mBv#(O6lDU*@S2>FvfzXCYs3XM&C07BtEA|y@934py!kuX6}GhGo-#E>I>8mdS?>^Wt5hEg@j!h=o{r2BPSYR9kV$)=p1vl^I|uD$`Mb}XH*Kqtc9)+Fho8K9TfZa$C`cl zhrNvN#<-?fzMv!nHNP@_11l*k2a8;{>-d~i(2uLRhg{z|x1*pU!$5jFVif|9crs&R zN%EW6^;&E@zV--0if9>kA%;Nd$k&QZ&T{kM+LoMeyY?rL;WV?MUXNzVCs*KB%js z_t0*%x|&RNIJq&3k0*gZlBPgdn_R$S$``UmvF$PjlY4`-&k*f9RT+~i@Wr#{-cFLR z@iUY(*_nKG4Vd}aU_*lL$Ch8T*#WIzGpx7V`gEK6wY!-qJosW(h;NUVRzq1?R_Ie7 z;kw48%-wbGJEebn@c*t$3{9Az2t*z3^zhzJeHo>`=ypMFIgH(#(MbfMqYi=uP?BjR zoaG^Z5?zm{rTn-~@78?|cf^ld_r^|9w2=~@XV%P>6|>$c_IV14Kpd_6YIXe%1RB+? ziL?47imSJHt`1{Z4cWs|7D*EO?lfn!LuV?KOYI9Ebm&Cp@T6!lS(IE0PbI=Wr!5we zpjqXIexr+SLhd2ub_Ev<8$lM_+dfAE8^?>%59Bvr++1(4xX)VTCy%T_k{NL;u2u(C z$pibBH+8KJ9=%QW@z&}LtPh1hWqJ5&9<%p$uxMA=UVbNfsZy&&(hkdpm%?TyeE?HZ zDieqetku)06C05gqrLsx0lPr=f6 zvFsp0#*Cq@?@R3`eNhLW9|`whIGzWY$69FSinHm&ZmyuB=^N;avYiOsjj?Yi9}pqC zxzv}Id%al}@NkrnGbO-h3po5Z@>cN}VfoT9)_I88tq+|%JtlhMSI2%heuf>t;bS1E zI*>=DGA?>6zfJ2rw?NGKEVXK{qf`}a_j?GheKq9`NkTLv1py--!kcHRX82m}kWYA` zo-PUt9&(LFY?9W*G1p2PPE@+v$I2E*`StiW%(o(n`MkF=>UrM$K`v4cPRg75FN2*N z&fPh0HiQLimC4PF8oll$bAD#S*KKj~)eo{3w01qoYc$5`Hd-L%j=1g-oMB-@sVQmd$lNm?%3MP9J?b@EWp~p{wx*hRrW**N zO6je)B7Obl@d0ONtnlhspkyR(6Z9eZIT{v$3AK}&%^98({l}+F+$oV#ZqBF662tgg zvjMkpnMKaMT--QMYF%n7E;haH%ZWW%%K^^o=FK`v$u<+4Itx?SupHN~%RJ3T?UU}JD^=<_)y8=YkwWq%ZMXyD;IAMZLIHH2Vq= zS!53e#?{rsIp%3LDC7;LL&HC|luOj5-&+m@f>PO@XF4OH&!!M^s9iL}a=dFvP&K~= zx%R4)F2|&_+@y>;(;SdGJvShET6~pz8Ydd(q)!6NW<-7=u}4~EkfM=LK~bi0rI6m8f zo#=MF5(-!&N<55R8`$zE{<<;;A=cnLmf$R8SXUhWk0EKuA*B$@gn6|_&x}=%<%z;e z`xQ(22B5;$U^#o`=`O9FPc5i23qNnYg3ms$>n*CuECeCko@CI{u9Pv;#sBDg>JD?{p zF_#WBbD&3s&X0$i*FB2%@{QHNKX3H<(u=)RHb;NDyvnxs9SV$nP$Jqu*qE;;q2lwL zoNH!V=g7#~NJ&}SRJ1V)2`7ZD%a_{Ye0SA7e=0(ZA>^3Ep*C8!Adl(l#}r_!k{7zm zhNl?2jGOAnuwd7 zi1q&MRHJ@|T<8!y>w9>)a@o~K7#FZw3g41 zPexdKzq6UCs6^Ju%OpRY_NksOH%!{0DWfEZiAo-&`hy+GfVp^m-#3z?af};GW%Ta( z@xoRM$KAp)oHlHGir|(NRUJ5^ZAPf1PL9gyohF0h(3x)O{H7n zBNUaai_ZO+#Q!B{#7og{ey43e{_>oLTcaeQLQl*4X2KeUPp2b)n9?(M%*H&n3YUY= z@LutEYU#kMQ*!-B5~1U|Ba#Czo>b%$i~0U+jse1|AJ zcbD_a>R_hj>blkYA_LQo*5PMZm6*3(#FMayDc|7IhbiNDBabi2br&|>E6zS`dx&*4 za9K%x{B_3ryb#Wyf>Kfi@tJRvnEl-VOYGGTbh`_OEhXA|S5a9HUHQfhe*yKevL)dx z?fQYJftzf2KG?dVS)!KeG#p9ZD2@b6Z~}MvM&Fx@@^!tBc2^Q6VIHR{#&d+6);Bk^ zX6iXO{v$?`Q!0U6sstMWhtdc;Y@fffqor5sr4q*y?Y9v6I!%Ro$wZ>b(KecD#`zjP z6d!UnCP#W2%@prtOL_Vq9L-WSLKkO#K0?pJpS7%2*n!%#jywMafTq!{9+@XszM`$X zjYgXZkMMJs3NI&AacRja?=ZG@=sF{L9_q$YB7bi5pM|yL8Rhb=Uyn*x89XAI2G!gh!iiH5`sHvd8+6%HyJKesT8-M zuPbh~+W>zb4fnL@VkeqYbHPD2x$X4Q{GD>6?gqg`L)F-J`-s1h6MWtaVF6gdH_S`_ zG|y#boS8cuOueflVl0GyO&KwdGy$^h{_zsXEvYr7va_5J`nCY-QSE&V{q*|Xqp6(9 z#Ont-y0s46+7}#KQSc;JdnBsaVthu99~pzJYw177k|!ToCsRPgHmO7@k*to)etszH z)38WRaKozwrkY31Ei8EUoJE4y=5xWnGfY7R7mNPz{6w!1tq;z?qb-EYRB#CbTl_NoC;*BJF>ERPzwo=h-fzExj6FD)>0r zc`=u6aZRFd?|-p8e1pgAZyhvOX3^b(P*HF}=XiJaEzU{&oqe$`31muOy&tV6Lj8{~ zL%aAW5^ZaA5lgh9Q#AEMdD4^g8@8OTxH@C2>sdc=9c^*c=&)w+uGhjXKV|TR_(Li; zO{^JcyIhwUxP7ver-fIR!pqC^8Rjn+dLI2bvy2sgWwt*|ml2`hqaQ#?9)5-IXJ``JzU@SdI ze6^8O4EG*0-Y-7jLBKmqTh6$xRk);pZ9Q1H+{)W-?V5 zAHzcOLeuA*2%o;`8qv-pql#o31PW(fD5F*$Sqj8t3O;TgSqA232^T=wa)isk$#NY= zkjm2>{rvI;>}MX9L14V@_Xo{y)dt0OB#XbczyC0OGZC-(eUfduo_R3!%g3$0#%@0x z{8{7c-PuBm#kAb95#5%m1HKV0RHRxcMlM?tho*%B&>t3Ob+ zNSC%}m$@T`)&^7mlbY8V`pT6))^69P6*G+thJ?0~vzn9Mk!W5p6`4>H4bCCHRDC+Z zQjg^4svp5|KOAGmVipoKb8nij%f>B93i7D?vI}=dFysK&8OEZnu&t&Q@(m|Z2dtxJ z*@;@n%|?VqqYa${IaVnTHPcf7uGC*#fHD% z;)yGV6QCy0;E>FpSsYP*kzesS!um#`m77Yk3-xrujK`A1`E9NJ!FrqUyB5Qjug9k8 zk;kGDcCZp@?nmx{siovH1R4rFV3+TQ+HF=LMFh+WLVeIk7&KzyB)2sApbE}1J<#!t zMPfape?BM=Sf#y53GY28<=X810 zlA~o{XhOy?>cu-{Yk(=qm*;oYio1B3>=u+{=1*a8O7o(SDBY$fHY11OWUMv> z<^^T~Yf%aMg$E=-`jc`Z z1hwwx_l$!q*N0&8k(~&H#jt(7!Ri`u5%!&oVGTp+qKE$8L><)xzH{H)%(%RE}_&f3XP7^6Ee-& zW+N&J8udQ(8l%6fO?6$&$EL)fra4KoXe-ekRyDOAN3&_^yfK9~aY~Grc{V#Fw&YDw z8hjRN-1=J*xk{HlA&X4nLCRP*3w&bwC_@vg65@Q!Jb6BbR6jpiS@}~a#;1an;Ab0V zv*a}dhJF#@CJ~MjSHI)c5ebU?51Da>*1~xKq6{$AY}3KuBfwF7fbxW-e4an;7oKi( z8sh$vq3u9gto=m9f+|tMvqVo+gLG<4O6@z@^eBpq*G~cC}nhhq`9H(h#mC{*$pR(u+c!bWzowSg2W`j9)cVAf7-rR zGuNc#W5t;7jSTo1$Y<@GpIQ7a-hQr4->6HC-L@WY!#vO|VlzHDwoLoeHGD_klH90Y zyeNg4>3FPL;>f_6lfp=8%|NxGLmYjPGbKlGAO-$6r(BW^b<0wA{ZX zIGW3G@E}HlC_Q$`YTmEGo}8PFl*VPYUcD+O#xEwuuR8|gQPDtai0w<76b$d~@qM%< z#Bi9GOL*r;m(wzFgSX#kC84^BuE`_c$9bils#-8)ZL3#Ey`ySga@#26WZr|8HaN&h zn5}8*Jv%WsG%0nl8rV-wVc~JPIL{AYJ6dejE+=BIv0v|OaMPKa6Tl<>zkqN_Qq2Lzbz>YS5sosGK>u>LCDa6K zdVHt2u&LrYp$ftsNc~~(b%s!m{FGvo1Cb23G7RS(M9Z?eSbG;i2sTyS1PhSw3pYNx zg6U9H1HxPv(O%k5E_G41pA#n(t6EjDq<-NK-}-3FGX8VRKI0!B&i~l7|8MN(FAV1| zlt}O|lt{taz}mt3zhFdwVjPH>h53KLM6aqt09OLo%?Ln7AfPx0z?}eoe=O{DEI>_wHv%N30P={L<$qE~Y=2x7 z|IP%w%Etc3!2b`_(JR&PKd7VsB@F+<9R7bGj#vOSMP^nGW`N)Us32xQ@sOQ~1@Ni^ z*djU*5aw6uCJ-|d9Saj9V8_4)Xq^MPV1ThCBOs{A{3n>N63-w200V#>7C`WimFOAT~w-g97NIzs3w$(K7ut;Q#^(kWzfTE`VJ9@iPI^&IDql z;{d(JfE5s01S}Z<7gRPTHaZSgX5iYa%aWoS+W+(UUr_~4(lPzjn^)~tAb>z$|2Y-efSJm~%mBC$vjf$b7(joVJYS8rU-ip? zkplq)ydRhW$O=f$-;Y^d6(K>dsRBv^seR1=zA+j`zw~ON%%(vAb!BOftGQb3k9Duoi+_EwNSq9R`^0#l<0j~{~ zzgqrR)L4Q04S>x6U-5r8{I4W11I+eVBnDv#~L}_7`XZpwNIz0bK=3 zv66s*Gk-PD!U2?KVEtp)3`{4M*U9$pb3j~x@dHx=NGSslen5ZrwetYn2E2CB1g0t7 zpLzN!bgBcj!E7JzhZLGjIAYifrF=GRi8D9?pQ0*V3 zwf|z(USYm}x-|Z|dH(UB_^+fI3*+BeA?>I!fK+?ad%`UozL-f0pPTzukm^e$`(Z@y zH5dyc;Ut`V2JDJw&~nY>50d-4s?@RfXPLaSdFkqp$09TB!;LFuuv5;C2y?h}EkS-p zjj9)6r^NxYB0uS^;>-gw*_Ls(I1JAMRrBX*56#Q_n7KvIlBd>O?e%4K1AFc5Yp!8o z7?^*{%QGOQo?K2Y{Q3%pWjJ3nt8=nryk$Xzn;ruFF%r5#K09Eb7D3B`4s~mZ4|<6D z1p^J|Np`GYf2r_f*58uSoo#2}C%?pZsvK5xN^^@zg-JF7b2pMGp?rQ)M8~KQKZ*|v zndyt8PQ)mS_?8`Y^jC2zniAFx~;6|x9s#PPUj1A7dcUp9Kj+!f*zh#6i8vT2k2TAfMN+*X@f z%JrW=zWydT_m|o8D;D;OW&Q*H{14*ne|dKQU(;rPw8sBmwAmkByuVMFzZq8jlQv^y zWM=>%+W$6x0F`q9ko+5LrtadbBwWjq{~k- z%1-3V;M*;UCr=lx5tTbW}t=ttL(2yD$W)e2qp1g;z(! zLtvLTkYZh;@hfgSAU99lK#%9=!S2bFC1VbhwvY?v$TUU^eMjF*u3F^1Au&Eii#sN+ zgZ}3?Uhdy7bLNrkvFM)4r?J5?++{g;l<2>J(W+IX`=4a*OqZq*K3M(4Ch>&hFTb$2 zz+$@^7;S;dP_`Xiw6`d>C_8@0zoQ#(f^0w!$o^K9YnZ6{$~)M#4tbZ?u7ZhNZY1QU?H!)34Cew{F#1m_{-8rAhV!h`v5ES6JocA z2%2mVH^2K6eI>EfpzU{MUSV~Ji0=IXtQd^VBRe9ycs#S*8+|suIFSk>nb0(LTb6L6 zP6=ptuqxI?$~o`H&^AM8=PbgI_YR9(_3u^WcF{J-tG&M9VL{-k!M*7e)Puu<=&C@a z=oQpsSqi&i$BDQSN(gntmFjZk#1iC`%Lf;6p|z#`9_=zDX9zZ+*BxnAk9pq(hXeni zUX(+r%Q68LsRBYJ>j>O9$ejyuyAwjKbM<~~wLs|emg_T^YxSI`o^W=rbU>0ib^LTk zD2L={?Do*ced7=kv;bd|?r8(A&4?5h+ynV&k-5y5+o0bSX$)C!4#d&>40&88)!ai` zclxx&EBy8Ff22d3PYW(LbIkmp+&7Z%gTDIYb0PLCqWn072n|O#&|h%E?tmf(eevyd zg&!LQd~!4aTR!CC*=9ikv(BM&-I}N`Z*ZI*=b>uWM@SDJ3lrD}cMr30zeb8T*XnAz z0S-A39?)($_8CMb8`ikbmFskt#hJ)oI1rz*u*TGbpIp`ah!R5B2;p4Td@_CTyke*g zT6hdRocY{m#U5GPC6?TXh6UQxW@{Gt55s!vuGn<~^DmyT*7u+1@V&_#=VbOZYLw(^ z=InUm4Yhrl1;UQ=1^G@Ap_T zr!%tg`QK!z{E~tj4tbuQVzR(F>FLJJc0@SoK^__rV>?UBG!pW{K26AC>91|M^>#TE zPK(kpO56#Ds+?BrDGqdwjrKA`cTpZIR?%tcrsgYGKo=v^o#DGlERnhB9^e%J0~8Ri{%IjzV{66)K}JWycR&R)TD*1RtMkKFbGdk}E5l6u%#` zqEA!JiNc+y8c`6}6|JQ*o);}tSSS$R8bxpmY1t}E!g`>1SlY*n%?-lRovu~doKvQt zO%J@PNyW#fC-fwUchWYYrDjIHWjP^N6jc}%mbX`m3N zr704#V?XTKKZ~-va+Z(E&LY)RZgj4OWudB{F)EZ;wN%ER%dFO-l~Tye%KgC4LHOBT zQc?j?0yQf_bTSbBl7Qk3F+UOp@yC%58`4!%k=?K<$oY^QB@}}yj_QVzLm`pEu$2X4 z3e3S(GX*rP=*hb@PB8O&)|9k-3g1@`O1d=W7#beojKr|_;&gP^DP?K_c@d;C7V=H6@VmruV;dtcIDn!!Li=WU{S!)k5dEsEm27;^}lzE7+T@O-sr(l3x_HNB)V}fP+^%S z`ijY*iG?G)$w$Yblh9)iQ(Dr?V7iX z1b26b;2s=;ySux)OK_)gcXxsWcXtQ`4ek=$=>&(ovAv1oGPk%HEYrR zJgZl)Dt^~<-`ABNQxBJOoRb~m>%{=cptdu&3%DfH_88QD_2d@x{B!NziAzwV>9gUk zRr?!xTpn6hZIAF5M1ylW!xu;R@xD2i!5i%YB`U2lebuR1*E!f@@wCY_(=TCu(>>|b z-4EEQ`@_1g!{@ihLDZ!NsyewZ;HEtB2W-=RDrME=aq6hN3cH=Ul*brf(u1?tKcA1~KR@!|1bze;R*U!rJk-F9=<7~}Cs&soUz z>n8Rtc+wLUF`z!#cHG5}yhMr!CxfRwBWCS1Wc|Pw7tNg-{QNd$#%69EK=-DrFc{aF zfhm94aYk_YWHq27ay-KhtR4jwi#i{H8gF;2-ZvSS~qtJPfB-!&a#- z(cB@d@%!aFQ9h;qH(ZvPRL9uTluwdsp7Zc-VQxLUa@x!xz^zo5*swSXH*jTxY zP(|_6<+J!an(LJF&uk0zltsyWe?L^7-(i=RQNWqu(U0<0a0 zs=p!x&2Od*cQz2OF*-waeuhAO^aeV2bNhG~;}Ze>$b_WugFo!diAje}M@P+&;|vm# zh3_|wwQIx<4Q+ay*Z3FVQOiCDr$!7~PMs>at_~p+%+n$l&~8+FYsO52A-mR> zI@`0joPsIkX{SfI4y?l*;k-&xgoHd)GmKC=Bvb-7g+)!c(1FM<E0s88$q;#2WF{DvdSh=!M3fzhKBZoOyzToCMA9s%zRWcW+0w;I32CQfu2|-bWPa^ z!Nv&7U8~seEl$JiIjpKl%4{yNgM#FvQF>Bh-o^J`s_Bu~%x_ko2EYEWS&+gXE|V7B zax(`ie~Lh#wKjZX;owjpDM?X;!toD>qum;2VL7k)w-25$clqM zv@3P9zlsm8wjR`8(!{4S>N44R@0=eVC!I+0*(D8pnjk5RRX6+Nnu@-0-)iaN>ED}J zFUYTWP-`(qUHcImo5Nwdh48eu4<8E*qomIcd*i@Bk`P4_21fVy4&_83arWn5)X{aprp$)Ob5&Uq1Pz9ZcC1 z$Npqm__5JkX}5d<<#Sp3^$Fk~JCt?2O6bY(unhd(rZ*!vf^FE(-8WrUFBER<|XPglEu+&}MFjM=!)s12>D&+Fh;>C=?x=860&B$I7VxaMjpS zDBG(%AxmuW>=Mar;Vye75Akc(-n{h{Jb#p*8n2~*hz`Kp#^$zAw)oa zQIO3`aXb&pAQgOD@1F|6dvT-|rH`0}&=B&Mi9jC18-b^b2)QSq**wB}?Y)jQUMA(> z=cm*|p%EiPqf#sQl~7Qp3BHb`YW%r67<(C+xo-m0y_j_o#_*87IEBr;r9n@-+)nim z{rPukaX}(=swRqd2=IW*1Cej(0>4WhZ1aOsaw*e^Z|qgJPoAOK2Gb8Wq6l3#e9N!i zezk(#VD9khxg~jCkj}Q!>UC^_efLF!%;sOL!^PbRUGn!L8wd~m;ns~iPY}7O=R2pn zjV0dTEGaZC5AUkZu2_qIy(8`wO70E5RF}Zff>b+cE${u+IBCq~*_fdf4)` znr%`x?b~Bvs!L7K;et@CO8o`*sqefywuh%;3p(=ir%}Fy9_1~um5ZtX6DTr63F{v$ z?FhdODC|Q6B`9Och}-)_DNU6!GGJ>Lfo`@4+UfWA6Vv>}E~njhHGAt0>x0W{`UZ^c zwFcsU#6EQOsU!+g0E0~8e|$j@9JWwv?uW(g&w|D0XV<2HG_d&~+x=AnV@w=(n^HWq z%{Kh}d0VIE!$M!0125VH zx1&u%#&4wMy-lg87V|M9!!9xME6iAFMm*aUyR&sL>-sT1C1PJ#D${UtmtTOTygPyM z^hnL=@`G(AjlIiDm%eP-Mvu>nGJfy$-Q5Cu=k!zeGoG-d$=h}3fN?U(R{t{lCP@p+ z@qFOn49bdG3`DVjikw~gp&q@Mh+*yoQ6xWhB-N#Tpz(f72Dbhqdc~-=Jj3_yi*bq6|M6=vbH-Sst zUfRHZEJues+OFxOG9&+)6hGE3Ok(=!9E}2I6tyhE^`8L7ogtG*n~&Uj^%N3|3*yYY zX-?I>dPkjeCp9un3NwP{Wqca*8#Q8LGuHP%{)|j71=Az}bQPC>lat5jami~X)O|o# zH)D~;fW@0v+s^(aK<#%|h{2cF#9(WiLiI&&w(6Fv$-Sq|h{KeGq>yNnukkT1lakh_ zw2eg3WeVH4=?R-h)d&FdY4y}rb^950)^9w8YNy2d8JYf9;^A0_=_WfPiFeoBIcTA9 zeG}fLz~?jZLBx2%KjLC%#5)D#6o2^XZd8zz$+gFVBuJ>-^*wUXEr)_Luck|i1}#@s zw{@GFuQffB4o2ampak<$TxxqJ;J^8QEWPa&JiEn`-6HjbrU3@tPawJ#r@*c-is%KY zAWEd>?55-B548rC7tdR^-KVaEeP5O(_8)xO?m(Fv!FBykjLTBAN5d-J`fBy(B`|%( zo~^U>#Q$LbO*e8(7~6`O)r~6#%zFSxW`Ld`1&gLZSxxo}V2el1km%`WwqP=ZKo=1Z z?i7ypdqbG#NVIqM%26}$GmJ={wa*>(S-iZB&g0xsb8!Rq(p$aaH-5l9@?m6I9 z>PKM~olNd^=PeDNAQX5(-KQcAPF3kpuZmEI5!G58pa@sAEpEASjBSjAJuaRfyrdfB z8|8d18Q=twRMUT_PFmsE5(Oo9!HgweJY^LdtTzA5(90{lR{mS_q9&ubLgGsVa+jR- zRvUe0h0nfrW)Y<@^rm}j1k}V&C!`2iKJ-5cuzrb01mtW9W3c0{2{bN!Lz*=@J|*9}99To{*U&DnM&7vm(+1>HrnW@J^S z7`-+GjrBB^)-MmAS@S7#HrHB~?)nTgwiZ{y;k6&uon>MbPmiC!r#4@3(aOhfB}yLS zsD}1*)Ku|@WStPVqi@sAR@L~i$?sL&OzNsrJS(<{i~iZa(C(VmaIG{VqI;hD zkKJx&pi5hElcvIURHVVDa)<%s?=2ty zZp;V*X@4Qlcl{OHJED7U_XvWbAW1qK$iNNizxb|PHVAfEfqxj;ye8-)M<;@*Eh=LE4%P{&9fP*2DA=8PO5(8~n^;_t+a1GMY!SN$*e z$pI3rb8vhD9r|C;7jz`Z1rau1JO?)wh7t%a8x(@&bP8Dv2f zzk7_*E{itOtxv{`A&ZCZItuA{0h7cp57xnMXt>(aoa2U5BOOeY0KQz1lAZwGCP#b8XKNE7@5N*_&%=9uE1gQ@#I`b zQeA2ksXszQUyAOvx8vVN3)sNWR?#Iae<@#D>}nPeL7zx#2Ht&_mo>aq@|A}UlANN6 z#ifiZ!?M!#6V@p6S69k=uEe!@xb&A*KU8>b;;dl5Sg`(XSSi#6NU;8)QX5OP$WqjyWooeVAc+p||eBZ;~u7WAmBs2b_nQl(37+L5be<_jyJ|{&m$; z4x$TM@O%uVX_lNTz}A$&E{nN?+-%+=J z|0VrjL$|*g#{MgG`|ASuXZsYWN9jMVh<`vg5P|-Gfo`8TSpF5d{gZu4;W@!x`&t*& zvx6jwTw75PeWk%{w1r_@(Imv)Y}BJJL>jRkLyHs&f`ReeqpVaG$0JRtV1p59qG2XH zO}j%bqYTpu2A5Rd1Eh~C`2Y*P{`0N((BQ=b^tZ6_9=ypu5_i4vu7CNbHs7?^24D-_ zK>jJH7o?yh{Br_WXAN8dV#L;Fgv%OLQc6z>%2U`D&qXYLs|<*P->^F|EgsP|et!;s z&AhT-p)`5k^Tr*JFa^CP&)kiEgr>$S4busgA^{o_%Wf(m;Cx=ID0pjyizW?b>xt+X12(JV90I+z_P`@mJoUt=v)`SF6~{gMHsM4pMb=d zZnBaxb8HtR?*n~2z03Vsnp#1f;#nVLpV}w=&2)=wtn)RNa6SWy^Mx?4-3TutDUtX= z8I_|@15<*i0ztk=-|B$j_vc_968Jtn1RGK13<5E}UDtJ#475`*QwP(7-xHe__>09N z6$rusthX{7!M*TCYB1;_Rs>lhT*WU(WI!5k{M-?1ZlWqDG7nRfKRcus{oQRO@w@i- zI=(19ID&yF+vQiX_vRlXSdf}Z8VuHx+~xR$@mtuP?Qwy}1;`M_KjA`!?>H;U16h`x z*ngsc8`teCSL8zv9DPEC&NjO1W5W6whFcQkPF&%)4C9U(BDWsCR&dcTpmeuuQ1Rp} z__Npx^TZz44Lsboum@%f@^|;WK?O-4ijW@|N#G;xGv-fBf|1&trGn3KTgI@cXVY7S zqJUc%9o`u$JBv|nd?J#ZRpid)kdhuoB%7x8&#Q$J*;#(NKgZxk4P(Fa75$jJtBd%_ zH5Zm0b?!c12L@?O0+#*O!l6=Tyce#oorz)V+(s>E<;OWLD)9GUhKzhF}sLc zR;X#`PvIs`QQ@KLyG9!c!q0V$`Q668?R9h@6l*wsvz8;vE)~hYU=gm6ipe*uc#P! zR+;3_Rac*V{5vMsN2Bvpc0PFRL5OGQJ0I++FXf_b|Dg9J<)`~&D4S8tPBQq2xyVOH zSZFHb$UuJ#IpWv5%-3#aktWS8j)qVR*EpkS>b-^bMWJq!mNfiB+XuRk8oqY5r7-6o zw>i^LI(ay8u+*Wgpvd-%^F@A9KQx4oOLy6Y2oMvA+oUI zRC_@SGbRU42cAA56nclYihVl_n>RYPt~)`Qc>=)_U-GUgJDQ9`Z8(9A;%M5*{PM$M zsOkqO0v>uogS02RV5Z}u%)7MagHKiBTS4)bd#-1O?eXj5c0r0u21`g zF>drD`%0roJB+YeMpT-imGti)qf!D|t#Dzj@Wdgu7|JwbCx$^OQ#=Xu36>O2cI|;#oiq z%HbX$hGzm+Fovs5GT8AB%))tMjSVJPToD^XjSWUw%2_7a!g(SJf8lac4W_vebHWXN zG+tK0YvDlD#TKQqtk}QLg;G21|K{~gSc!+UK>K@kfh@cS=}fp4C-@_&Z7DxDjW1wL zi_jwSay7af_i>8+@ffEp{c2TR>UXAeFCvsiNp=sWH(-`NIjvAxibGS!CV99o)?9{==jz;hyihRYM)u zdG=Kbo3Op2l+E%bOGAUT|YG;nh)6lme)b!-ES(*iVs?1y3&DX7?KvJ-2XS z*Oy;D@hrP;9Ub`zY`Axd*e@-R%~Een9@?VXEYWae>OlRR-erC592dP8c;)q##|cOw zJS$trT-!s;z7RRmgYSe8#BKK*J16cDSoJS&Zn0~t^e$O3v)R-+C#Jnqd-Z$kgODh1 zZ22j~>TMz~QS-I5%Y;7ds{Dux+lcIuqY>3<70XoAO?7Xt1Z|}B+Bw$64)!vr-KTR{ zT@TY@ewiUQL5^`WFRGsAPnxMECW;YOz}#g1Sde--07683RQEdC)p01%3^>hdIZT2kD(-{x}hM^5=NMX9y zbag&fGCZ$WGp?=^HV3s9DB<&79=9;c;7Qj>I^I2&||H7!j}Uq7FPdwk4! zg@RAqhIvy{x;#Sqc6R2qomm2bbw)Y+5OK$2%oy)rA!c0v8Ex1dC-k35-?_kl((6pkTOFg7>m4#%ItfhY? zqK9kH1*@|r!Y|dho_#8L^Y}R4737b*wQ=r8MG@v`&0dG@ z|IpJ47cc;!98@dUcg~{Ji#f3m9WMSg0sHhfVun84;P>jY2lg&Qd_Di6_Fk45Veu^ww5-eM+kt7#BYIJKgYu&dgqc8j(o z@8AQ-xe`rLt5$$Mzrw%h@A@B;cKo6%wS6^WFSoGVqkS8a*b>vX;zi9k=;caactY_3 z<$}xhO=$~qjw@{X=lPLh9#7``xB+i@0-h0*xekh*E9)1|o>|9CX~Cc-B>Htyr|mTp z27TH%?e+E7BT{=D;_~f3%u|)CICQiZbJo<{Vj1c>YwSHkM(tUTgc`~RtJyR3)dQnt z98KGz(gw}7Af$dd;T@Ik*yX}~u+TXj^&=Ry#l_7Quxk#FpGbl+d)-JJi|Ne@t~!xp^ zn)3cQozy(N?8LaDbIQF)dU+{Vjck2J$geIU!L`NkjIH$>KX(VPI>w>)`}AvCA@+ zHrpVSUgT-^1r7V@NY%F^>9jwazt0yljhxCB1howXkDbLB(<%mM9qK{X?lD)DyMXS; z(H6gG&rSD!;AE4P%0vp-7Ocr_FKRo!50@ST~l3GtQcR#>UdVKWV& zI*{#CU)DRJ#;r{geB7}~B5Jy|MbtNfYDXkIQG-`Zu^qu<`fewt>>UL`xv4Q=8=8)b zbd6ANv=Q&jwA9G6{uZAQ+rcKJ|6})!>;Cx{{|o)^%&-Ig{jmlnE=(A|5S4lCp79cr zqm;OAiQV&py%kEoDq~~m{4NW+5IsK5c4e?o`Ftl+lAkLo)P{vBv!kC8Ji~<3GVzPh zuhog^8MdFGOhK)OH;{;VaknJHd`MR7# zbl-}YElxVG1Qag)V|**1HUSc3Kd)@Da{)7>WmBPf=Rlpm_gBMd9`3zUs?^PcG+@>b zE8|f$`m+p|@wnfmjE$yvo`d1DM`f0D5DiD#J=@YF%{!+A1hpuw8>onzGZyg~KhXI2 z1{(VEP;-jHGo98hghqGQ0GlDrPAHWc9WbRvje4bH{FlVl*;n>CSJ8Jy`3w(zGI!g; zH+*eW_mJFvi8cqc{69X;=bAnh{KT10t~xr`O%_-s*QK}L5Mk@l-n}u1d+K;l{2H({ z$hXI#rOruW&M-OU12xqPuDEng$BB)izNne12142C+@`PJtD@wo$OcDrh+@=^*LBUQ z+8ODZd$_3u-D`@*%)?)%m7A-lS-#%F&n?lx=VC1e25oQ_JZZQPhD*vnyFt(=bea+lk+XmWJH75tt_8-zs8t0fxjrhH#aksw@nsJ|4g&KcXSwdYWnt_Uv{S8 zcV_IXd7==g-zV?(JP6Pt?Mm*$WO*QjXMZP9PWDECTw zmr2(A!c#fZxwZ4Z4o_vq+@u{0B00GfBc#~blDJmhq@IW2wKygGqpRtExElP?958H7 z5U`3#uD08=f#U1+eUb(?j_><>Tz9mOOh?>S(d_?n{gbSzIV$pTx%LpJwwUKN`_ZsC zLDX0NpB9(8lGfg*5Ff(OdHL6soyilO&n4p3GL3dTWUGRSoF?m+_8jp*`l zyt|R!Ux545`(9AB8pxdVcMi_=?lAh>pafD({f9w```xk9Of z$D{UGP{S|%;fXp?V$}`yKLq#7BJxbiu^;<`jeavj#8{!3B6AdUdAax$(-$7E16(Sq zcMl#173v0*LU5bhxqIZq6c5; zz}BXB_J(LT=8MjTpsfzQMkRZJ(Ofm(`#{;!%^sG%Z2U+f*eg`i55v|Dv_T~ z0vEL~l@PU|^O~i~c!N8bq1s$mD$H;5Pggf`Z?oy9MK#>Cr}}Shq2%|P#2B{(!Hzco z|NTw}x(xsEN&oY$;U8b`_h0^tS?PbiCH!wwchJ=gy3PC_sr&ml{^g$aPeqjfnYx3% z2cJM=`M+TqkOS`1zi7SAyWxCOhF=6SXXE{5j67hz2 z_Zv$Pus^(dZSU_RMG`70f!~G27sm+{!2itiQv2Yw|m;a3t_M?1EtXxQvufM8@XUptX{oF2R}=r`yjm; zdLeGB*bi$ufLz_R{SD`3N>a?I=XW7HKw-+@ z?H%7(J|sGedBxRiw%VC9ABSkeI{w1oH=9%@p134=0X4P`WAuzP0#^VD&q+E?Ikz0U zk9`k8w{2Nq)CDij0w?=Kb&z^x0Cth|rFxQ`+t&c^Co6n-rIYv5@q{|@JmVlE{|H$3 zRZrjHcQe!X`)kjlzk-Y z8sjJ=%yAj07RV9xAm6qlth)=(?PPLUV+R0XV2c(U@jH z0e#$@yurwBrEwQh>R>@zxBT3MdBHKLU3yPgldlV!Ca zWBGa!owNnxZTcuh+=!aiPp1{y9z53nfGdb`%Or#-q6pkd#TL8ZFn)F^DL1jZ>%6=X z2<3^sMv}V;)(X+?I=k$(r`co_&e~CnS+ZG9iXbUbYQSA}>yKvNiRN(#XU z&01mcqodSjN*9Sga%b2U)mlFw%YoN`nOZRUHN}^pSyf;i!m|&}CHa;0XBN7IG zai=Vb1!~EUhR`@Yrc`O(Fim|-U8~zmS07oWsgv%g6$5cBn{Vp{YOZ`56pyS7mz2lt zpw3E(mEvd-cDSwx2`R~LbUT#Tq=!}XI{3f9TBd*Iw$%I3+eiH-*HZ#gk!g5o5T+3Z zKl%tfXZ+^mhrC25p(QUKr(6wTD)B^+b;73G*KvnF8J2hAOY_ed8e_64f(d;Zk{oi} zuZ}C0MsT5o7ZfKrX&s7TL`o{%m)v*1`Aj5JK9fIp7n(gBF{GLDb@@bAY0nTPzAt@9 z;W4>>--Io?`Xj+u{5a9Rxl4kt7|cAJ8`Fzk1{DLgKO=I#ZC9IGmrRF5hsJ9~af@ld zC<$+m95!{W@OeA_qVc(7P-ne)=MGp`&=Q(gyws2sgBO%Ea;DU`zEc?&f%bHMDPr=P z`?QdKfg9F`;OAUlQRlg0wjU)tpKxkX^+lMAx#l~Mz`Ky=dS_~(S`+ApLVHBQl&Beo zbnH=Ewtr__eLKQH0Z5-6njC9bR{;xv0APxn2q&%JT+l_SC$d9j&q?MUySBs%71HDzE|%%8lXFy?UYG7O$i7sWZjkah0qMr~0l!`T zwQ5cguOvo!gbwL@?02SL3E$94Z>_x$i7zeiiLe&o76we6AQ{oRfp@?=nrT&|aF(Q^ zk~x@hiH`85xw_XA(*L$Sqj@$F1SeLK> zuh2a)*!?V84>bwLD_!|mw&r2Abl z@3C{V8RBhJ$AWIkPs4*#q$eka$IOKR<~2fPm^j3-42VxyCKU8Qv(onAMkpNL+emv zcqL|u#*GKa?Jm)9v!BJ;gb-Qic~CUL6$hrV;L7M&_nm{gg3r4lvX0vJ*$Tg!a{d{N zT}E&|tNB$Cz&S2xm6N|wSk3>;EcmR*YmL6XCM^hax5szq&phqQ9q-=T3zJ`UiC?Z4 z@4cJvw5xJvjSUea?2Y@FrS^;pBp7q`{V_DhMee6gXAk*U>4pb1)`~w))mma1F4dIBq)%!CdYV&weLfn0+2#JpJZ)z5~X$2Tp*E5uVhwZ$0t$@>Rc z7ja8B)ydF*@^*s}+kr%uI=VuP)O@$T ze%S3ymh9IgTIVn1Oi8YoL|l-sxw&-eJ8-V@@ZJ-4yi3Uo<5SCCtmk7`#588}q{`x* z=N@ikue8v!oJQ9uo_U}_job#3E|zeH+Mi4pd=<3I+azf$tfIJAGl6z7^AHhia&F`e z9Bw?sP!U6d;(8-ef)E*fn-j3Gp_7QK2!N4b<2lCUkW-wG)M8Lp>Jt;gi{c;VMc%#h z^TGaRRa`hj&H(`TAc^Vk$<^U1fbA5dj$plCZQ+(1%oe*{);a$SJI|&|YJgcKJ0p?S zz?{3AGp3R?Pn#Rh6|R(|+b(`2cT|-7R$WP7?7bNH!TAMcTwcff$Gy^z&0ljA(> zupA=AD?=$8gD@*FlADd+zX3HzL-? z!H+?ae&cKcE=7A;PDKQ$g_(m5AnoJN1M!7e>wat5i*`X>0=GS1axb=DC`hc93!(WfNqnwMdaSoj)S&7))MC>(>cSIY$QTI7yknUf` zctgPCV|w=~1J#aZ(I?=~ux~=E(}IG$vQby9J+XCR?l6Pzr^w@khr*6pi?`_OZZl{h z_|TNlmWmuDdWl{PfH1QYU;qXVfIrz|%~5;j%h_ZT|k~Y3~*5xb5)iS?XB? zR3QctLHrj?AXm{=l)=G+)}#o~cJ@*{Ah>)8%5y@VO3Tx8qM3i0$I^hcH@9+Qk?U$L z7iYnGXRKaF%e^PfJ}|9D@hkR*Edcuf%cf93Wqnd4q}^-FD1CcCqN8p%8WnB>MMK2N zx=Y?k1VR97UrkQ0?h;Wly7dA}j1&S3b}~@NC0|HqvSqg)I;$i?X|D6|!FesouabL8XIExQ%g45T@A9g+d+zK;>?NH3UuG+a9GH4k zUa%GA&-^$&OAjOxOYQjq#E_KGEGh@ugPw%_@J5)G*%CXEfBdL%zL3^%v%8T4z$yg@ zQHr;*tA=iNDD;N@xKzmX$ofOL=fk_IncVqse4^|KBNA)-hnr$yyx7^*_dKw-$ANI|Mt^&x(6$p~s zEATc=m}{OOFE<)^qXc2MzAsW_c1RAfb6&tjzN<80V0d+g;Kg`N>c^Ybo&ne$E%pIxf*d(0c0Q59hh(56AKP zvvRvN8x9kD#BUSFrFi%G{5G|ns@PsL#g~y=gLChhMUI)+Z>;I zU=)8)FnL&@upI3L=9jcJ%&N+wIdS&G!uVn;SmwNEDR4I|L~^G5y1zUTNpTPjNX~X- zjagh?pHN;8MsGs(NPBcw_(tf5-W}8KTEENX(nOA3SSuz7^`ZmJFHZmB{#jDd*tjxk z?aOeKN-FV>i=%n3dif2)ER1QD7aKVuvh^eCMLRZLzD)UvzF#7@eGc<0tAH;{=?Aqv zpF;fW@-~GWLmAA5($u_6CziB>I9+^XfsuVi)a^(O;wii;$ja#bb%G4P?)>^|O^s>q zk124mTkolNOc&FvjEzw0>&iW#dj60g_?|fyOZo2ykY9}&WlQNm)EZAxbV5WtXG2Wm zG#|4Zj9U@(8!0AOZk%6|cmbjE+=)J}ub{UjL3KjdJ`X~=zVH`H%eI-(ZVuN3InRy< zp=lYU2I!FXU{0)KRR9gqD`s4Nn@^v?14rnFJ~e>RuMJKc;(tGYQed#9Xxt6^UL#Y( zrgL15NgpN?hpyC+A~#&vc&e5wZaIhN#+zG>;jRY5E2(>9#XmAtPsH|yD}wz{$XTuq z|E!fm;;vd$>;z897K*RK?hpG_`YaonV?H=ibx+iOC~z?f6JG@V4}L`^3+1e!#(Wv& zDAjip4mk74;buPA_CjP+3^$6YISiXMsU-25X|vPd4IzO2%kNBvNy7j?eIhSCdOCsB zKf*0H#_E=VBbXySu6Yh_8oP^G*Uga;jVI{CF;W%_3zYr?*vOsZIQwGm3l2A$*gqF8 zu%Q!~D@W_SjFr^&_2mgNhi-)vp0REVhn`ap=XZuCZ-kZ7YkqaAPT=$(vKnSUWhmB0 zz)s`Lo__h9Sm(=^7G7255jw~p&j%~}C9m|tXo;U$j75=h;I=Jm2P*FnN|Sve8F#4h z{w0HIZMqSa=PefKL&xy0c(G;lE@#jk__|L2P|?RMGlWm z1-Z|kt`t?Vs%?b!usWYdY3GUEHW@jM0y}=%FsyRhKQdq|*vtG3%c?}57gg^kf;^p0 z(3?(z39s}J^7{?yp!oZ`tTHX=I%uKrtGYX1L_96y>7Q@%xtDZAuHgeCbptXho#E-Z zWbH9jD3+IWAg`wBB|b69)E1Q9$mq|Dnf?Ucrt2?l$@oY#wXvBz?xK1pMZ$VsFb7`W zCf7q2zlRtNQN_chPg?*Pz8-Gpg=nM4(WaRh)1Rs(9exku$@Ccu`AUgZY+p!*s@vjw z>~f_x{cY}*Q9MFrh3vfk+_9GBFt3jt z9`;xc7=*C``UjEONuX2!>qSVdo3s#C#u=EvQS!MF7^I*mCwW$1s1-DOQCKk+1AtBo z%^t8-gl8i<3C33h$%dK>*a}1-1UNX6d!s6b;H*nM^+mgVyhnW@Y$U~CC3qgModufX zhB6EhezNNg*MlVwN9_$}*(a4C#bJ%Qr`i{WWhEH^2#`i;^|3;K8CO+;eE+#7?Bk$DfCEB|Nq12zgb1VVH7kN|QLBtmcUdar@&b2IB?-Yp(555E060$7 zL4^Pu5v*M)+_Yyx31>p2LA236*r9qS{~LDPQD=U=KyGlGq)}t?7_pq8YjWH$ErjCa zfFNGn5N^C{ZaqZP#BUMT03;UhYxFo(Ba<6dt59ix>JIskH6XvwtwJ;hJQ*!}+hj<# z@bd%fLFS=z6dhraz*fNEh5Uk!Lu)Tz&{H*Dk0E%&jjK^Vm;{oV^g2TP;MPkJ=TSM4|sgP z-?(5uQ(XMu3&8%$b!g)XY?nAru`dLmI3PMMI8eJ|JhbJsju20J5n@RGicKu;3-2Fx z|H)3V2lZmAEfBv8XSa2U*XDc#0lGdq8i*1^|Ye zI8*=-5J-(?7-UFo7^Vq&6ulxCS~6nZJ*{JYfVrm_73^09IQGp090&Y|TDg0Wrb&As zr^$)K50hYaU`LCud~d+I5SU4OV3^5!rmu0kLhr%5V(u;P1$HBxSRQ=t0ehoYFhh#l zsrwUGv|Um6IB}yps31Fz_YKuO;VZx^c+YT05n$Ly00;=-LQ`~k=IhG!MN{bK0~ih% z57nX?2G;Jt?pkw4-6M5H-!qPScnLo6yY;aSQThgYDU`oGxj!1d4Z1&;zcnem-81mM z>|GuM-=X-1Hu^nKK+@Pz<6|D+;dYdav)y7+Mu7*?AT<1$;AXe*tpFPev^c9Au%pH^X$-(MGYG=Ip`!U}SPKF`kjUQA$`<8C*$cq|DL> zH(F^*@!j{JXsVsW$paP|w4!;6SDk`yoj>s_Gnc7C9s3XH${*02d~D+2e003Um6jCW zy+@KjN4tPlPd%XoX!vUMY`5K9iH%615SX{;TE|dx)_LwkpTD2Z97uBIfIFJOOjn4f z#w)q0bh!gOu44V~<4(LYEvlrV{b1C-ok6iQYnjo!hyH!-1qGzbpuv8`ykX9!Zm_1x zc5WVC-o_laNzo?x92yDxs_1xpxw81Nq@g}JX3m+ze$KR*3prNR8G*HA-AFxhm%-72DWUj0voEl$I?t2(0ONAK80nfD-9)pLTJ>V2=!Grt+Y)~6=^?kt6 zhYd%jy&z6?KF9$R0QbHf+ML`YLOhm>7IL73NCf;p#JzKrB;A6qU0p_3RhNw}+qUg4 z+qP}nR+sHA+qP}n?wkFdnKSR1Ip6)}zq>MOtzgEEojYP>N+ zb@~)efoEp^_*3XWfzO9Mz-RqnuwHQ@Md@qG|7F#gC3KapJB;|$u#e*Qs^s3s?+j{h z2p&)+%1C8jbNxx{wj!#!InlS$7Ga>>9|YDW;pjQRO(Bt+(|#pC{dE9k@M}bQo9N4A z@)MV$Cg3x#{_YBa=2DczR~?T?Ms3Fz2NLVgM~g===XJE+wf))`dBVJr{|;z-bjZ4( zve-Rbn?3h52gpxX@J1h6a_7JW_{Gap_XatCHET_kVM2c0r4KVUkA$skmn&y zU9iw^ol%n4u%H88=d!7Imuym1{wX~``M8X_kD~i53?#V30$_#xd=%Y$VC`R8x)^ks zr2K+vP#g7T)*_$QFT+Ynv1D+6#Hf^@$c@NxS2g01vUs@VY!Z7(X2PkBoGn#bE{z$` zP|#eFw!h!*TI3UU{Gc6t+JwQc)0tuxuvjOTT;a&d^Tjv*AprYoNqC&txZmj-XHKg` z0*M9+%g@OCJf4@GE+t5EN-aW|*JCL174dV7piF{PK;u@id?B4++*l^lnmJ7&PGMIjt2>blT?G{{K#9K7(!gAzFgmgdiXTmD<=d~U-xTzZ%?niyCNC9kb2fx{F^`9 zuyy~>SipbyyL8~u-B7XT{E_Rxz*CcHVA+Z;zlk?Cathnt6f+qS{oC8FP;`bqrJjuY zre9Z+jqwY9THnNnKL$JE8Mr9dS)V&r@ILVnU$Aq3^|+)-Yyo=A%;sv5ye=p>%^yP7 zV=V&XJECtn7W#s;s}?ycZyA;6Ko%ZN;h#Gfk$8%dMRrYNn3a<(g&Gh)lILGXRk%jv zBvE8&0^9pWyC=U$R{GtPh<^{=Am1*oc7+VXDNAsvKZGVlf>HStV$fECZr}>W*c=oK z-1($kRWLG)Kw@_{;Wt^>Bp%~FlMR=X-;#8{H14ZD>j?&g7TohqT4WoUw(L5XH9+q6l|TS6N{ z?D*_36;W2?yLm`Gm(m6@=Sz;q(w8H0J;A``OLv)EkLGSGQJzSUv|yy^bAt4}qW}3*fP7fi0Ym z-b8##DQ|(h|8k$DuM(2pynLWP*D!WrZxR@&%|wd{y+YTj)74oxBR9$Ic=wq?mT8MINvULspfN5vUYiPOnrgz(Wd0mk&OZsqrO?2df)Qe`YBTA)gq_R zX#QMbk?JL%0|?p_-BH}O^Zz<0^z9Z#ZkrVW~k4=!pjG}KJ9PgPLzZ<;M6+#)L zUl)f-74=W?o2d>*+}9@umk}%oCj=x2 zAQ9e0I*dC|r*Ncml}Dyz&^A@cr-Y=4R3Wfnv8J(VRI4n7REMt{b}qP=GiuDq+++j# zOc>7>$2{W8`{MGnsjibkOP4^5bE_Cxj~1jMq_mhgYX=Gup;2rl*dZYaleZP~aIO*O znkl<+1&gSGyedxniln!-A2&N24SI<5qSnig-k2DfoFc0smq-!pXhsUIQPmgIP6~a9 z;N9T3@XPiN5-r_RK*iG+#8DAwgi5W75m=v$Xpd+|F?aG+o1rN_oY0raM!^^4O$XeY zN_x+l*G`9VtNsG17{~40#YXZV35glR#u%STkug z9tJ%8gMqy@jR-A~Gf*)_7;g9NHZdqBi-*uN4Lf}I#s`hTh~%#X`*yE_F;3k3Ahbl( zSg2=2M75bR(V`xyFP5r7MdLKWLc00UM*62y1u8r`r+IS zPG-9ups{W}JuuJWp5?<6Uy@(RZkAYezNg!GKWYwbUE=#FqG|ff#1<+oFbZCl*4DkH zqkcLMo~JCTRdwiX4CG5n93H}ZS`OTTNTzs=izK7*sr4KJwYsyaCg4cn>_BdZkLO{u z2$JL}TgwtdWWeY zypRzXtT_maicO z&hFSU#Y5J!XZ2amL>DXx3z&*la7w>c9!Ro=$knXMpvaWP5xL41%9hR?n-%kL+cu0r zvZgK`O9LJZsX%)IkA?6c!Xh_Nz9I^=Z?f25z*(uAUb~4lzHwb#@YuAOC_4RE$l*K! zW_GGI16nhY^k~o!v2QaHq@8*KL7sv|b^7fc82({aMj4wP=;SHg%i?Tr6|u41BR}8e zgCinBR|iDs0#dWp7?>W2U_galKZl&Mn+gR1t~LvlRW#~C6Ow4yy#6stS7Q(>FCaja zPD&ZLzF{}uAllpc3BLQepEUE`XNs41qjozi8to3bDSr=O9BE2w+PF1SWm~^Idmh`W zvO@6=I+X5n>M7dUS|9p6s(^{d3-h-7wl9d`MWFa_8cfGrV=h9s4`{F3VFZ zDZ;S+yRJd{to6}ryv0Q4xET6Gi7)4N7z%1)a>PNB*^n%Bk^?aws?EQ;gOdmP$01>` zge?X1_9(LR%NR)LGI|I!DnsR2da2gEn^fQZ+oE`|Sv{az?Cc}@u2gdCPvx>q$R1$^ zaepk7u5*Y}hjs1-AcfiNp(fVz zv~R7bu;{owA@+D z3d?ZDwO&k9{lLhxGua`@9!(kadi80N7--S0WkKuLZ{`cF4Gi8hBaEWO)~|)<(M1}l zRXwDSy4mckq}*G-y4UFSXDDi0U2f)xf%IzsZQ(sm zm=Kc|*K5(}=|*!g2z)cnJz6=MLKJHVC(+_qVN9K}786Qi zp9c7gP}^3C>w8|DYG!zJt@;j^oKz%4_^4~{HJr!Gbp5JlTi4Ij7oHBry}kz$#B2Bo z^i$j%`pu2hnd49Lk5b#>7w-q}9C4FJ%){xGUTm`#v$&R(ci$8G4;qjv9jzh@sjDA7 z@tgy#)2)jXs!&)+Gn<|V8=(uU{*pKO&A>|*KF;%MqP)u7+)8RI91D{sT{Xf(LmJj3 zTNvzVxYRiVOE+JTSq6dGNOK3HHS|s82h*10*v5ok?XVap10(y>AQKRzhC^eL#3r9O zTd@CQ>VrT=l%qf?Q(ZO)_v0?It?YZXhP>kJ=SeGk<4mbdPjz0=R*Qcn-`wC!O&^>m zCN=o#Mw%bHIvOz_Z15bDL2ueVUpUWf$Jhd@QJS?vkyQkZ#QRfUbn&Rmy?$?u7|+K2 z{-WFYYIOt)uk-gQJ8SI20`v)B%QuIOg!8bs{p^)I+nm^M;Yb)kXy@zuJfq}=y8V1^b)(b#Of zTIqVH)fEFhD^=C@Vf89*Iosf3+7KmWYE#nPhngv05?F-8D??Barewlbjj$-&THW$4 zO||2#2A>Oyu_Nr(vYW-ctcxBU8l9Uvh-y{0DP|X)rwK2Ouo|~>U2}Hs$Wz29Qqz#u62yk%)N?6jQojt7O~U z9RvZsjwfK;K$+C0p%hM0l1ZnAt)S^<1jLz=!{C|BY50O_T8>tlfVD_48I{>rm21My z*C4{9B^J3=M!8FafVN$4OubZ{K=U54ZTT2^(w?;zezOR@;SMrD4`HwkuT~Z@UxOZ6 zDcYg00#K+|(Nt_D)9kXDuE`^)wBwawC=!taCVuWWqo0Y(B+9Dy5_>@Nu6@`I9rfQl zTJ#Haehg37dA~Yc*v!@zeI~F}dWTBcZ!Jjx3*nQ*Z`n2PSui(yO;Op8F$>?GvB2Gs`Vg1b1W zI|PE<0t5nXX~x$Snk(f_Qs$~fjdtkvYBEo8k?cwCQ!AyW(l+yJYg(=++GFf_V7#(T z9NG*`ur_AWucqXl=X1e-vgV+t>)~O8Hl!Y>H&eWPC<{7OZ5i6KOA-(VGu^TGkP5Nb z-=Xt|iqx@{A-HhbFF^xeF$YXLjhToI5W+BacxgJgQHBrD32M5 zWKQYWI5n|pwAa!*Xze|XLZKPcJ0)N_<*Zf9REI0qVg^^i(9D#ny~{L^3XHCewOVJT z8YCaM5J3uFqjDo-Q=G%R>?CICT#Yhzl*dW2W<`b9YBbIt5>O`E1gfykWGqM#$p!g58Zt(K!cr;p$oJ$X!Sbz(`F_$~%o;Vo{54qJ4O ztt(NVap64(^L)yj7n5>NDSbM^!)flsvLXU+ z(y-ZsKHgK|frRS3<=R+*Dc5-0N| zI<%3rO21OZ^miZqn;uLYo<`v#f$Y#mg*tu_yhZDIWKMM3@|fT4z?TE8xctn%A=g$+ z#0Z0vg!F;hgbgtKsVO1i#^rYXAN^n@}e%oYZ z-TDf6BRnbexvYI-5FLo+IC^2}D4BsImkyO$Fuw7E=^XfOo5)UqeY5k}U{;Kvkp69D zQVex#_ZtQhXf%Up5aA5C<>`d6!|6oiwheB#OYLjzC0^xL-d2(9@zY7nvFtI*G{k+x z{lsf!X-WQT!REK=eD2av#p$v0Z{If)x*jBe>xd%2)xyN^tx=l7pzLy76%(JrNQI#F z<+ws!@`eplYjw^IyM|Y2F_+(jAq=`os43ZJdX!+Ri~Yj<*(9D~1ZoQMva(13$ULHs zzHac>_$m+W4K0x^#A4s~&E?fq3UBxAZdMuk*NF_vL;}{J>SZ%hFXt3p@r!$>Y!mOe z!zHvEp3GmGyGGFp;fF|>T;kakXcdQD;>CviHu}`bbEL?@Oh09j-oHlV*h(SOFv_)I zj}MbC)Pj8V&@RoEzCm9l#*OPE+*L7%qzaEGOF0RNP!PH_D`D0op<-<(Z>Ob%A0{Fa zO$x2jysWNfieXs_bF!~gDApmYiocxZrR*Z;q`^zzRqjmeT4cWf4w4MEDc8V9hR6zuLL zo^9r|tqBZov!$>vq`qj$;(R{8_#SW>Ie+vGKtiMO)=i@sC!IN|si6$7_ED=Hpktdu-Rg=)JG04$ z6p>?=#J+wvVsEJ?4$0~G)vRjUEKHs9%XcfywWcqXV`X~`_mSd_lVFBj^O+?i-yekY ztNBe$$nQGBA7e6@rwz>SE^8gD7@1Lt-w+9)!#Ula_oCSiuXdt679~77z2b>DlpCgU z{l++#o4%^-0Fmt-EfDSWD&p5U?6Z>#RC2Iq*UJScZ|ci`6?dEcpPA!@!ENFFQ*J z?gSUnXC%t$J4{pGV@aKms4G$;epJ&a{De!#w* zuntUT4_N9H+Wu>ki1GJp1e1M{;J9txJu>*Sa{*&mR~gr1f+-f+-ufv*cTn|R$D5xQ zyM5~e5X}(|CyM10$*;}_ks`i3c#%LFXoP%HH-WKHg6X9yw2sG0l0{7s;a zLoEZ{@H3P#J|-B=h8df=jAY>a%K6PwlSIk^+(zG#D-4VY>oD0*&Qn%__LPCQd}^$k zU0yh&o+9O8E5RH@sXl_xJFGk&^5O<8R#RpzP{;8+=Oiy=>zss8s>tqQ?~JeGJK8dF z7qh=M4vkf6vOqA=kB%Xohxb@u(!@+l$DcVe9;|R^m2!0RbT2IZs9apO&|JuNFV*Ii zv8rKh9&|gDynA4re|dbV!pI&A{Xr{v7!d?D3)(kOJ7T17MFK_*hj6IFzA?l<+Yj zHJiB;CwGTnX&Z2U(MWHMBp~$r7Y#?D6s&Q445p>QxA8%bnZY$H&bE(@Qi!hbwbrlP zX)ZK#3*^ME3`YKAT8FFhk2`NuhW>azuw-c12Pf-o#cJ!j6XU8j8(KfM{>Ulf@|*%qvk(R;nHuTx%E+Er8MC6a;T8-B0B3%-J{C= zX#Hh_$>z>}SUkW=D=jo9L?j(JCqyQ_Ulf+fgkUJGHD-09DS8R?0rpyUU9euXzVM25 zuM&bc>{@V5-c#Z7#&x;L98}#_D8&({@GBLa>!H=%tK;XUs!bW|)Xc=p6#Y2;B>e<^ zlZeJ$#knf;dcKZp-OoujLJmS7HB+@__IIQ{t)h%u8_M>=r;p$esi%XDoG!*!srjU8 z9H~+D2Xq7pq#dR>^Xg1%|4i$|OzR>p`UtOI22(US6=M;pU-M|uox%sT)p#0{V$Z1e zu>v+Fu1v3xubi*GR9ilPx5IRZw(EC$rwBtTiw`h;K}eEo zg!nbLLvi3CUpAmDAy)lb;ikqkcBLqE(I2Go= zdDi)h|2+g_rtTD)X>ms;TT4We!uB5X@CYTD71gGS6lFw4gc<#sGE9JIJ$p+##IMO> zO)=Mq<_1vJ4S!AO06og3aAj>5i`Xmeno`QqGTmR-9WYjSf`^>AEuyo=RZX(yXcc-l z!wFgqvQh)KBPI7gWvCicFM^!ZwsB23573oyb;e^HQx|$VL3I6`OA|`*gbNDbjq)%3riRxM9Y$RqoQpAPC)srv}Oa=eIB-<>@dj{h6y+OU?r?q<)W~ zVMHiezdBpx)e4v>HINZ(5!Z`|61_(Gg_W1BF@N+~D)0A})u^T63792og>SEwq_-4E zSKAThm>$eim1Dq<;&MQ66>LRI{_bHp&|$XlAR3ZUzV5htgw|jm6%!A8^-P(nC9t;o zy^eaSp-(9&jZ?>t4pDmAR-?b#-dkS1`Lb+)QTu-G(O4FcepxtIO*q;feO7Jpdt_Ju zPlGeJiibj^7E$E_9B)liq%8}aQS&^Y70Ir8LmqPsUr#G|Oo%~KB(vLc*+*_yVy(8S zL2382!9WWkCIcgr$9kf*1?Q~sj(T*GQHM3BeLV}cVl8g9Lh3@VMY^JGecYe9o<_2D zgoo8G=9Yf&kbZB$rB@W~rSS!^vq08R)WtF+s(#i`!m83qrobKeHTN_3vqXEom{h^* zNGN?(iSfFF=~0Bo=d|Ua>_|iPResZYEM%T%;b}tAO2O#8tYVtv2T!V3o;kh7#5o9p z8tKWjB8qgH?&1oAA4VA%*jcV$ROh^z818L-f3V=h0Ph38dWq)8D)Cd4a#W?rnt!c< z4f?XX%30`3{u@mp>#wW!&Z+C#J)3p0u`V|x_VhV=<>#;Lr}+-@TW$@oJQqT-cM8d%ueyrP6Aus+zzL4C|;O?6N4W*dKI@D_S8~~f z9i{by8EWh~J<#NQf0b0O$s2TVUzF$}40h|>mI(fef|7MVIZ0!AXl>G7<1}G{gXFZP z+3!NtQHNZRicoefg?3M8x4D7VZdz5yP5TU8Rh7QiDneo~`bD+_X$-c1)8-Cb7+c5wG-3gym*+{jJBK!|Z2*16V zo~5+nzhQ&WG1D*r#}-GekN>VV z2CM}rcj^CcZetb}CIFfF4-%Q0k%8$yY^Dr!bO5<29pKD=IcNbR^8XM)0K@Bz82-Xv z{I?PLf0IxC9To6DC?bDZE1_ux^c;--Y{RGh@7MlMxWNAdhBhnnAKC|ij={hRP_!}v z>J31}_yY_9_=o`@Z2CV$V1UsWfFJ*d>Gtot0p!Dsf7`?iKzRJ?VRZQH0H-V)6Dtb< zLju6A1E3rKsf-4we*tP*0M7b9R^S5~j|RX~{g2ZCpd|nj$v+nXCHSu$|7-3~$^J>c z2B1y;f{FacRs7chf3D|G$pBz%IsikP{yzvG%yfTO&jK34Kb!9Vji3Drd&)yDS*47-=-@-&M^mSfVH?!jV1HVT_%;;855W z7Iftt%PduK(NISR=R)bHwX=AFe<zE`<%UC*Sg zE;qFz+W+|!Vfq{B;_tWBKW~!%LE!lBx6}WcyYUwr|G#lJ{=(k>9sTk*UjBb_Hvl4O z#y@y|GfPJ!d;C8(07oMsBLf>lBWT)x>;uqfGcf(VpLhJ?&Wl9U5Qmd|%xo5R{yRpXbRQ5vU! zF4sq%QQ||m0iC_;C}W>R3MlW|YVz^ynyPYi%BtQ}y6PzV+6(y|#E`3Q-{@%HzOBVG zaG1fgatw>tI{tSNr3?OkTUz6RbUN4W?oZY$;m|dZkEJ1L2`M$Uvq#h8GuVd};)MaH z_b-0Y9gq)=@xoEb%_#3Yf!E?CG3hF-UG9rbda?usufVI>IXdmRa4)&|WJ7{o&fZFQ z?K~V^9DxF&-{;?o>sl71M*@Dl67J7I8_*;tp-y#ppI%_db;1u8r|<{pM7~0mu<~=4 zRkYTgynNw65@==pra)U-Moo_Ev@fgj?RE)xQnV~0SsgM@4S1WKmZ!I63_!5sIz~E&reRI8N^C}qlWPhg2h&OyC=8KWfj*2uy ztFu+!A2J~z?D}#7ih!Z?lMJ0V{JxBkjBjDbzyHQ#;q{cA0!m5 zbyQ-r+0Jd)e^HAhuu1c;EC25`VHje@~DtK*ll6EBduDF0M*>ik6x=-8o<{|wOroL&MhwbmK7;Lw zwIj_(h+pmR#Q~P&e~;>mZ6Ob9Dg@mcW~L5~Ic_d4H+G$FaWn4Zlhi@DQz7p+%Mx5`_oXL6XP>OIb^jh$2#GoeZZ7SvO>BCDL8 zB~BL(?A=ylS`(^oGCj;)q2r*TkDSa^%LA*^i}mXkM&=cnkEE?JXs^8na;`wBe_Li= zuSkO;h;CA~AFUVW;^Zf~A-J@W{?_6uSv4$tBtun$qOMLwoqyD*YENx6;2_MC?)5Xj zp^$EdAWt0ZAod7Nb)Z6^zF1XVH8l=rGR?TjQNTENI`mttI(;C1IazDuqEVGpzWszjS`Q(7rA0uTUJ%H1_|oc#0sF=>ji zAY|%agtuJj8A$o>Bhaj_0Y4!b+_+1Q75E3J)u2hjJ?B2WfI-L;Jqc_5RBQNRJN62~ zv2T{I_&OA!b_TCH-2mJMYm@;`fb}Hotmi+MzU_dC*O_5QggpdxjsJLL-!hL06+2fJ z6TYZO<-+SvNh;%lk1w+gD(mqS;vjI~olyJv``IyNq?#9>4vzhIVx^X}j896S^(X9S z^h@OEY%u2jUV7Nim>`ls2r^KUx!UJ%*#YmSj)!hvCu8>Jbo7s#w_kZiZaLR0qn(GI zDja+5MryU`KPi)S^k0r30fL;qy=G5?Dz@*Grz-C02W0n{4xJEe9^dPh&s=+IH|4io z!7&}9S%z5asC@PY6QVfIitt7gtRqo8=!En*LVI8Ra-_holA!IT@IyUrdo)nLRD&u+E>kJ5kjst~qd(9hA9|4d7dhbC zpL<~H;4AzMJGmT8JG=$LrXc2mzq?Vn;I4@6VjJURW=AS_Iu8wvc0*cbF~wA#DFt#V zCy+0ZylqJqxE4?scs*oJ>BAj0z1BlaMs_7EzT?dGo24^}W{s#k zcjTzLUYntEed^7Cu|dTF*|K*%Y8`FxfyVGl-hwQmgQ!T{-BHO~Q+oquo9Y%M$!|;g zF!#4c!wq_ZtNAhNh9(E+1NMMYx5IVKiTjC~xMA>yT9fJc0jYRl9dHX>O<%t4DX>PH ztKJEZDYhC*+XX5^G)n*OL&`+3BnRFu&U&ozw556%bM)71=)+cnt101kF|Iv|<&jbY z9Ke>8plQW^eF!L%*Zx(o2kb#hCt%Tim5j*mRj8^O-=9~GAG=D}&y)*MUdB%hal#qV z-QjyVehL1?L=d8E>?WNUVkX|NyS+Z1ysMD-#g*+VOMDLc?Y;uTawVICd-OAUlL52k z(T59cf#||9ow1ObN9-L#jD$%U{`OUh63&!_fu;L`QU{W>mhABYQjCmt6u>B4AZLX5 z!6kSg!Wq^Xni8R{X5cfkI~QMlPTkZ*Y~+l%?6~AIfW}_$-O26!B9JFy zvg8y8x_9wi8~HSpI^Nbjt4AL)9QT_+gktG~$PaS~ckmp$hufv47Mb@QCz(q!@*I_M z6cy!hBE$6eMo&S?y`Oa(ur>Lk@W)AgrAf z_W)1{;2hJVR=IdcQH``hPe>U zua^arHfD>wFv9ya;VPji*&_tDTW{(u?Z5wx#>c z`%wsKvXZPv&S)j}GaYaP&aUE!VXgbKub<=jp&*bVaLFbxhD4>@9#D7}$IPb23`NNb z3FP#Oboe6v4;vAC8>9q7xmf1q(Jxluxlz^wsjB58`y`r`o2gU8PPozOTg%>dH==7x znOYNjw$e^Lnc>7vclGWGtivs1x8)dh?*qZng-NtD8QQIS3w6vVdNqVcz`}chXt7W$ z(^BL_6zK#=z~Ga=2-6Th{TT{=U}A_zttXPRkPwrvOUm`IBuQ)tgU%Mi(g_*a)@TK2 z354&U-t#z9WWRm}`sc0{GFLsgrq;gqAo+v3z=dNe)T497=&VHB~eJz+|*qyl!>sP9Co zA)rp`BFGTIY!lK1#Z_D0_YntpTP-hpMyj2n)?;J66YvMg!3z_a>MGLq7PW^7(kYA9 z$z5JW3Y#QTaNQ|AJ)LwEPB)7oxTfmu0hr%ZJqf+?HS4(>}FE*h++0E4bsrxv-P6p{e-3t1p==ZJDOn(5A*kWg8{tv zVeOFUNXqau$Usue_ILvE)t+9=Z&y3Fy`vmdFK6%hUyOVk8@h=xb;tQr)j}?lajEMN?bf%d17zE>2XOQrck^tKjw9e?u=0s` zdV71ZvP>mshQ6>wHa=V1%Ih6!dkV zN+-p5eZ0FKLz}*H+QWoPvK>XCSx>Pio@UJid*W!@39c>Lsi*F|6zSKPvg$`3D4s)tgkq^vuWp6F}IplHDM`@Tk^ao-L(Qk z6_WOC=8_4~ecAZ^>=yE+fGgZ5l-JXW$mbcF&kD5a7lQAbeCPSahDR?9DXuZ8$LX>NBuFPKT#j~-00 zh}P-;;OrVLgD!tn0qSICaoF+W%;E60!DXa&IUeVp)A5jrUSpT}y_#Df_;fty;CG1a z28{>Bl+oi-nVhHpra+MAn78<=ZF!NIf;*uN5jshZb_@w<{hQ)BQ|{bsj6xhS(iwta zM=8WZw8AKjsqc4YVj>Wtp}f)Muc*O=^sF>=gq6iqBQK}Sr9u%(<&#@o&z6UyWmz!F zlU~n-W6Q0kpXvN*<)f$x$2oSh@or0#HlA(9i|Dm(W?shJGM!Z`txcd^>bIbB+OSv) zH1AkcPC%F>r65jJy7N5B6~dcV{8lF59jHd zkdZf)hD~;-cV1rlMEB=(G%J0bo=W6c;Sm<=Lv($QeU6~Yl1%Od`4#+ZMamQWmocQj z&h}I(Ec55vr?`eu=eAL!da@m2FBe15Vy;eTuM(UD*9A}8s!fd-s>9!Ihj_a;{8GH^ zR^PfEit!#!e4SU=uTQIvtwq#H;91>U3)0W!@QXhf!tY zLRJOuxOun(wp=uyU<4cKOl`x0e`+Ky)oc0kQSbcGPpk4|7m-ZRwhejbJm5#J^qy6` zL4M{>rxb3Mpk`~DZru7kM0V9pvg&5X`~DdFq34nOAz5CpF_T-1vn{h;@XkEjSwrj; z7AD8un2eat=Cea6LWKnIp|rcL)yA&p8+HRyI1qDB9JCSe?^qb}oq_(+xaR)8zJOo0 zKjm(U&q17eE+!?h-BI^Z8lF9b9p-yZOIj9-!PinzCLonps4_Q`lgoyxakyOPt_zMw z=JuTzE;j@k-}^=;9R$Hkcp>{riLQtq3q5H-EHH;n{F$dxJW&?c$3p4GBR4+4E01&Y z@)7wy0iA-3o*v-#OWblvvR6UBalh2wUNgQyHD0c#f62JYNMDP=Bc^pZ1LUeea< zI!ze5PSs1K{thN`UU4qsuZ8cp#f}JFU{}iu&Ndvr;45sY4!OMl)slGKE|Vq(miiK% zh0A+4FjmZsda|2yIAEz4BJ1lbP_ZHEt>DQO!w5~C>@yZL{~U47v^Mva7M)-&$dWNZ zcpEBGL{2l`p#?C%VYm#hP*c3&SD~sqzq_g+itGuijCpg)m8BL~(XUe3Nes{Z31^Mm zY-Q$H*}~4hbTc#Y;(Z+S_b!zp9I%RMaSTvRJ9}qFAz7d?2mo z1~1jd8^0Y9`*LTJfyJip+Q(YI&R1<`TApsmR5UGlNyAOVQ z8honp60CvnhC)m5#wIzns(aS!Cx`J!)lmPrmTrLFMICwfiGDvjo;TC-r273RNFXt|}MhOUEsUu2ZH`=kH z@|^m|a?;urLrTRf#<+%nSE>l{<8asApKxbe52_YfDI}gF6BXc!uh!p=7mHoEZ|319 zxX-jF@iqlqz;%TS5rc4_4<_lzT!J5$oJFuU&2arzrQuUBE?|Hn^X@t3Zeb{igqLizsDVAQ8r+Bocl4JppQ@-9kxM<{P6R+pf^dpSDwf*>PMuMa%)5#_&_&ipFeSLwk zx;TJB>6|XwY^oQ%ARU28VP5u=g*n2EX*IDK_JvKAX{xR27<6O}ZJE9rBFk z>xu~->^5`6(cs4=NS4*uka&N;ocOg^oOzd#aEP|!hkC~gg6E;6*TDGwyWZmMa}S$c z_vwPP$I~Uc=c^8U2z(c>@pT<{9p{PnnqRE=eF<|LB7M#r(`p2Z+L1*;)TT&Ch@U^^#V zU$yfu){B2v2mg%Y{vTx4On^|Ckr`ky1|-*iQe!42nm@e)81Wg|{>$tPfaB9JGydg( z23RTpVhd)#3RcEHHf2UYzRkb}P((AZFav_=znsvl41my|p7}q;6Mzb*`4dC}d=oVE z%m7IJG&uo93AO8#21tTNCkb&>uXm6xv1?`%t6+iJO2@eFgF7W0AK@Xea zz9~pxh6(y8&O+cn#uK0zVAyY8Sa6w9pg>jFaJ6C9!ABVf2PER+}pO^CBZ5T|SWuhJ%HQd_L#y_22Kbg$pTz_jh z>RR4!x?x^sJS52mdx;dBJSF8%S`^_w7urA|#SLJ-uGU zhb!};Nli9gE;0LN{no?=D3M}Oa%u`oh0AWGFysr`N8eRCdsl@7SLSlc2j*MIxTfYF z$I;wpA5aKVP10mlSt_s5*t-;d;-1wS%+0uR2 zTGQ5=wr$(CZQGt}+O}=mwr$(Cap!*b+1>9xeNNy0bnmx_tjLPUsF78_8c+V8aifDc zRnR{8`Z}t5mvsJI-1QSukFzGbw%ua!?EzU*@5|=VSgAH=4Fx?+55ZRH;60U)*E$$j zD;oqfna+A=ymmfmR?oHO?FXZw=SnQu1&|~Ahxzf9nhaAH$Kz;D$a!0=WnCE(RVnS; zHdf+Q?Q2=-SQ&U|FN!(j*-vww3e9MV#1b1GUKW4TT8oE(?1|q*WB$!y#_&p6&FI>= z5OX^ESqLY|VohZUw(B4fdAe8!GgGN=#Nm4vd1^!W%*P>n*zYu~CNR6Y+eOKpLZVB~B%CBb6_zH>}aa zx$vY8=jJn#D+E|9#B96^NyM_-^~(jXKyBCkZxC?coe0jWFU&f4ARD9Lp|FA}-6Lto z1=Q;t7PE)DlU)d%(v_=%jRnwt=_m&>b{D<~tP&yeI+b0=^MvDf8$FQAeBT}00aL=n z>|GOVF^rq84Cd=06b5zY?=dJ0G+Z2996qq=`@%`B55iGULp!sLYUVTEX^c&;0DP(= z!J(&cV9wf*n6W7({mZ3n+M&ol6Gev zYquvi*0hvdkSk26%TNMtE({~2#f;on20P9dw4imsP$JC6c~K8>5sz|(5)XBV26e$F zg<0K^YdT>envM8l0)0@VvgGqLyy!=7HHH%lig!)QoVnb|m)pD@Y>}3>F#ou{#82(r zk8Zd7C2NG2%4Ig)NwkzJ)>+hqUs%c(jl3L=v=G%*`0kmzNGesNw>Sobji}z&TljOo z#BqQ0IdC~L2w|DSmJ!P7>wG2*87pnF4RIJeG*B^cD{&plKPbG-7)|LxB$UFLC!g;8 zM$76fXbw05Iv1u~xPbPgqV7FV8Mvk2W$k2?=(D@upZjo!{9XEjwz#K^7mtSI!>x;+ za0-3rG=td_oCB?pC;CV5&lm^6?ZB^7J&fDudTS+^9m^cjqvc%7uyF-5U7SyE%Tbj< z%*m!C-TADWJ&*q{>wCN+ngxg{exCf ze0aAdREpuRNzGYHHwB>Z8v_u#2#jj(*$-ulR}+fux1K0`jJLaVS`*?XJ?zhbTOlQS zjj`ntvSkcuO1eTRX(;yAW}B`Od(W=gq2x8;Y~*3hfY%eQ7X9oo z@~WFIXGlthNmCz>YA$5k0v^0gpR=WHs1IFoV5}UFT(1C0#_I1=iI}*~0h?G3;Cv3g zA{i23j=>cQrJz+GM-cA^zKM5HJBO_zM}8^N2!o8qJ4qxpFnrcE>E*-Kz`z*+mveG`Krn`e;buB!*hVsXqU8?CIX z#5#U!$xL$Dr_y;n;;s>)IBh=*q1cBS1ya}y{~ryE0a*(rh6Br(a!w+wo;r_MB@+7t zLbc>(9(N6PT(kNmM#YsN#!+1JOmC43+R4V`E26srl0>rSlaFZl5F|rH=N)7hI2a&6 zGRO6&wV2}HI>jtYtLO*f7~6AjRJAWwwZDn`#ndBi*=<{1Q{a8?2EYuXP)y5u&VLYB zB1Me4GzgyW#XV76CGp9Yq*EGg19Blog-R!spcH9@C8z~hHa+tYgxxfXZF)^VmL8SX zC0vfuXKdOs>mg+MTcVV8`kw4AdAv4}j<6`J27@dd^&c%0RL+yliwU&|%=9;SAT8#} zQcnBE_(`4;aZoRoT8A1~%qIo|?MltL@8WqNR12+TFRsxwXdRYST!k&{GS;cG`syB* zoz5?U-*_YS9o>p?bu4J)=#=k0gA}8fZ|jiyOzl~=YrD&={d$Q%l5ZK@)w@B=N;Yp1 zOUdOPB>zLJjB4o&Byf9BiJeJPLG1byIM}(NVLr%c>Xk$4T9-cva`O92 zmXnf^kkypu%QFPbxaDoI?VhW{ePCcv;A!97;3?R~Sfyz?dOMpZh_ic86H(-*^-f$< zNUtP^uoVJauA;Rq9LlhWF&>SI=^*Pwzgesx?My-%hST^fqNr$MZf=J({ar38A$9k* zRdcYBqiKw|P?F5(Ws$=JmY|lVdJU&52X?Ksc_aLHj+#WS|{($ioS$WY-{LZEG zbvs+b*~(SM4E)s3R~6D|POjX9sO?aN%#-EN1e=*~TQE%N<2{bTun=;K% zB3mlc+ysRQA=$UDx`vu#?{n= z-=e%|WIar!s?15OhJAB5(mpBv;Gr~1;9|cZf)-18JcYA_^kMS4aiRPQX;BLljI%># zbB0)ac7qI2>QY5YS8$&zsmW(2^+b%Cy4X zf59q(*#b0ohyn_&@=LM=f*U{c#UFV!zl_@qZBA?)_v--#mA9asJ z6+1PxcDcx)wJe>=NP|m);;=dv?nLA6*~MJh`o@*U4^jUmf48v}f_@imTU45gHeRmG z_-rH-OTHrWfY_ZaFsaq`%_>$~(;X$UKQfEPSi#x)0F+@*_a-vR8C2RC9B6 z_OXF7-A<^Y!plWi+kNCXQ@wnU^n#F3Dxsqtp~#sZ?%s>h#*T9+g0cz{F={95xAU?EI__+VG%fECP{ju zq+-Ik?bck}e9YtU7S$?gPIE24TuEt3$yHqXvUtj(lW=laTedf>_&2^w6WMTyb66VD zf_uND;9O{0`unQaT>LJuAW2IBE8&B2O;z&>^V-=klKXA`_dtfr_i&Mk&`O3VckBg+p^KFuJS7&J7_96u<}=2h~oF-E(T-8s!%w zRMK#{?R$RsffARKMqs%+`08!;bR&n2fI62=^+8kVOk00xmDxDTjFL3kyb?FL&m8;9 zg{+NUn63d^lm*t9i%GhNqJGq-W#2FS3SDw^{XM$gMiTOqn%F(1QCMX+-(^f%TpeG{ z7rT2JTW@1`6}yyu!q}DAlUUu0j<<>x!X^=`c!{L(#B`i*53$!UKM8FbOr2HgM|Zqv zyle>KuFd1#I6RJ46L=rIX5C=VAZbANK&Y8HjTSfHsGdq%H*oumAw*FiH?pYbe2zcK z+pcgV&zwMFL2FwHz3&J3wa3+i8Cv>jsQ~c&4rHffd zCQ0SDl0vFW+1Sz#A$REa*bZ6z498n%T!JpGXIm$4_xi8&ugtHmuUWB+CzAKAUA#{x zToGDP1TXQ9nKU0rf9U+yil}FfP9QXD&9yk_rK`E({0a#r=fYk_$5jX0_9^roGL7mk z4UM&8rGqD0g}YR-)5(I2RQQ?C{O-*-C6nd~>Lr=8DH}hP8da}`M!x@0w7O_WA!L4lD@_R?d1}ps1EJscy9v?iSf?k%T z&(K4C|1kuoGir66wKteVU?)D^MBl|OXUa;7DUc8u?J)!yX$its9ZVhSfT;6QlJqeI zrR?ODFg4ity1y!l%TvN_uU3EWIYn^~8_+hi1=q5iQ;Wu*$ zsh*#6?LuYYenR|jJM(;O>o=&xEnxDztUXBsbk;_XzL&&9{H zg2TY&~x z2jyslPn--!^QX;5?Uh(OG+nU3E%xC$&d{}Wd2t!4c$_%>pgM6GK?j_4T(70gMo@Yc z_t3ibChp3o$MZ8v$X8lrWoZv(A&-^EWkQ2PjA0EIno>+b_K|bDqF5aV3)cdkSri24Ce}07>z8hmV1ao0)NaFmlb_(1bjqH zLtwIJV84Kb@JMWuAC(oGei$g()H5(3Rx#P|6S0ejRW)K|A8!OrAvQ@Dx&_64L|cri zhvQD3V!-3pjsmADx*Z;3L_25n9Z-1(d7n-p?X4OkY`9F8U(0W7*t{mWUFz2DefAx~ zsvG`Rh=R~MwjEVDx}kaY9i?eoc^`a4-4@XmJuwA)$BBaYjNq!9algv8q6OJ`*X@>S zq|Fw<6~>Cdi~q8xBlc;>=+<*SwrhZLA3%kdi{u5?ijlj&uQ|g5&~g827^b|dKZg9q z+z#=QVQa{2gt0ly1M(@_{09Bjc7)i5x&`_v1g%pNe>A1noN(M z(y`QHpIGSQ%s22%sn*+uDErtl(w}fp`{X7BU;dK&WiFu>Jw~@vF01Pime(x3=nQz=z@P!_mb{Ytl#BTYBIUZAxYoOd7=y)Wa+iFV0sNe-_nmv-JrE#vrkyEcx&z> z$AZ+PariS2Q{ND|s@#HZ;&t`1?=QJ5z0*gEdHOvKTilXKHM3IOid%95Qc#E;pXw=N_R9Moo$9zk?C4Za9<$e<-^cO`uaZ(b zwas2VOHSSj{@(c_O!X|8zZWkkoJvULk~VvN|6b~X+#Ir~WYx<`s^^Or|6EmEs%OUh z{aHcb*7lM^SK$`~Lx-T5TiEh`O zx&-IEKHXcj=RQ(Dza;0J#U-~W8UHlR-)qXvIg3he5ifl!p?uSHsG)k<`#ILE8+z4a+It7c`~>tm9jF( zPLmXpA45~yUo)Qtg!lLV^=df`9SG#{Uo-F0;t>}7bBXxO_<8>v7}dMe_t$*23cn-J z7XEr~L!H*8wZB?WL*>1_|C|Nl>!%ZDaK?X4C)55_X8V7re+mDM)1(x2(6unt|4&@= zANAY+Rm=T)b;-bF#E?;|E7ceRf75N+~)sfFa9G9|NpfV z_J8CWzk$xb;cF~^lMennUX70K-}3UB(LH8;G?0H)E??ufMOf{_0TdF#4XVgn!RoF_ zn6F+IvW0ei8?idp`Gvm!_0?}ZQuxO_BLatew&PU`h?Yxk{5QC(Yf_vl;+j%G+*?kh`C~44P;QoS@@=X-=!n!mo^%B5MWi<2F1e zWNKokq@%-~%}s=)&nL(NWW33^q-ei<(8V2l>7F2gf;$LVC2PC?dRKhwg8!j={m;0= z|4TUhk9C%R#Nq$0v;4&h|NZm(+p7Mhv#`?rztV>^O#en7cBp|Vs4t~>K5>Q&B!4p% zq9*REynK8nKoy{{>?nT9<77zsNC2B>wrE3=2*fwB%}(t)=PV&j%Pd2-z8Qt4=|-uo ztfHw*Qgj)tnntT95``1_<`x8pU9r^x8R;Xe^hIv($CppqonKvFe|T_wap;0UKtqp< zEv#<1Q$BG-VT%FO!q8J8!iXNfZfJdRSY@tEv`*Du*JiNMv%iL9V4PqaBk9Nzyu3fb zin&*pSh>KrRC&C9iJho76)=C~eI4@MAo{{KJ_fP5M<18~k9&c?R(woXAH%ZidCjx-Q(PEFHBzUtsUqApsc7ypvTFAzxEdJi}3-pcaWXJsCP{GuqK zD}X310dx(*7RVPcPd~N>;4^eS`G@{w` z;UeOtfuWz6CJ6Z>e}&L-Qz3`o$m4QEo&W`vAd`2no~$L;I_yE}6Up375*WuCQ30R9 zJ4g_I>zlg;sfX`z^uvuL+J^`n3`CY!0YN-ODK}%8&x3z5!YqjB-aqCVjyIQSbq@8a z2UH^U&GQ^L_NWqW<% z_IHACMgC@?C-l_O9gIHI9wkvwOS)^QD}4!=HZ>ly>ds2n@%*=+TSK9xi2I)!58IpFvy;)UJNejaz&Z6#?95E|spUUY zEybO)!NN5MC@3IFAzS@ppbj5e?Gk(nuJ~Gopu{&9E-Adl!Ids4V9PLrW1`+lg5{7x z>X0JA33J))ELxMzZA96QwC9@A;jsGsu-Q78vOp-^TL1#gHITX8KbD)ZR};IFsj|Vc z*|K$UIO_wmy#T_m{1Mf1n1sUnFxIIF$NhiG1ZMTG%(CAVJdJm;cls9-plz4<&n~UJ zY_29BogJQPf$Nk<7iJ^f*BB2aOl!?!yKB|X1jDR)Qm2h?_gmFsqX2q8cDexz1 z-32_Sfp#MEFM@gO!Ds4r15RfAg*I4$e|`V62sf9x^%ZcOB)pQV>q^E$nQ|{9S--~_ zVcIpDHyfWv*}D_@${pkQmyB}zC(^V$kGuEct}Tzbx7M#!Pz7{sW!EZp{x#X`u$z`P zA1q)tz#{ zO(Rh@Tbu`^li~0klN)EuSbGzzp@-PZ+OzWbtGjvsv>N|I^? zQ<xm=lonRvZkoe())WBAbCoy$mi{(gSG0cx0G#)d-?=l3@q;R!np%Si`{nmkY-fn^OCt^phrUOm=$_cr#@!W zN^AV6hrH)!a@SY;nxfI-Srd?7oFsE9kzY@RTSv>dq8qS<6QBb(clbzUWwS@x4fP#i1`O#td2J2T{P0(nclgzQRXY5-jg{F8)?JD>cw0BoG^fck7hU|OjPmcbly5mt_`x#^(B16($ib&;Ng+LQ=B4A=Cth2;fR+{g|M;zel% z=fX439(_&V{5-d%>z4l-m&J->WR?!<~EGbo`7t7S4~@+FcMY+Q z>}piXfe>8Xq}VX9!dBkrQyJSP1QS{)G6Y)D^8L=kZ)ENT24`Zz5Jy~&A7N(DlTudV z;+j0C#s@uNTF~`D!hOXpiMi_I7J`XZh^Hg41+?~T@vFLkBE33FV|RXvV*>qAMSX}q zQremMAq7#!@-yQQ@WZAhq%>sc2bMJIf(>Mu2sf-OBkFUk3ed=>z1O<#W84^*Yt~G=%BncUHR%4BQU6isVR z%BN&l4Qr!{bSrdou^oweOSh+~%pi_umC8#L)TDG5kf&z?u5#sP8{KK34iiJH!gm`R zFE$D_hb_BX2T6=odmC15A1=yM&gG=W#uRUCZAItMOW_*4QS()+r7{+lW$1;7c5(w5 zE^V!Drrq3$0{&Z$Z;1+u8!o%n$9Jb#9g zkx~vkp=$PuBL}>0Yev!*9}fFA7`I43=l1^Sln>RfM+}6Ejm({{kJ74EG-iiHQ~@CbsF;_Acp*K3DEAZ? zE#kZYD+0r7%cyCku6CC3mk$#sdE5>MoK$9NE;N2jXT3sJ*_d}6Gp9U=xfg6nPGaAx zE8vfT?@mL`SxUaW)_-x9tscK+lR-n0-0b7@-s@!DuxYD-=a!EV3h_iuR}fKcO3_>bw6@S|F= z9!9fDy|E5TXDE~)5~+dWqr$l56N)&A@~QFB^upS@+Hg8iT_vN%dOj*u)1W>7qEV5! zYM_Ic20yyMyGL~e^+Zs)tZ@pZ8jnZgcq-C_l(LCY|CHt(eldCO%IHzppoEg0BQv_% zmEp5w;bbHhVE>4%DeGU(X0N@FYZOXU%t__3V)?Wlu8Zgek?HqG#lKfa#wHL%2{?JX z7Y7`N#*OfE3p9c}!1K(>`^yQd#i=6CA=(#XgDZeCM0dpz=ndgO&Rq=*o3LhMY{*XXj90V>Q*qS7LdX`F(GF#wU7n4J@j8wWh?uwxMz%2D-u=gj3x_hBKS0i zSH8kGF4J=oy3ITj1T6IIL-TiaJU4>*c&CQwq1pN4&?9NMQT>+_;t--V-7lE9lO~wN zd%tk&YE&^~ZnSuid?mtke`&RBLv+Web#&uyi)0TnPd>|z-Y;0B$+2TpJE++U;)lO> zBKH1i+QO^)%@~qjTR%WeIp}p9V#LPc*G9c3*)PX8*!Dvd;x@29+?hRy619@*T*jOt z997zMe@NUAf(Vc=U8`MMkBmlq7WXAJ1gWBu zQ(^2-owtFsw}DzAvs+NMt&J{Ws$zx?3j~+qNM>1#mmpqPPc>(9mqf+m9S}vB$}XaL znE)Os8`_Dk8-CNHKlKxbR`yb1K7l>G6Wwg`xPg?_Bs>gQ5XreK>SH`jx<84V(p))E z+O#ujHd97tY6&uV>v|$ljEH~6=S*S)G4A=~X9U#__^m*_haPJSdHbI+2&XnfL441w zA5Jhms}Q$#BYP%?Z81-EsxjCdz;RpTJ7W8R_u?q_4$h#L_ zS|n_BU(7{J5O9LGt@Ob47gskjBlQ=c=~@HXkwg`(Y}%J0|;%LR&rEg)IGh zB+}4=o)5Tgbh8g|=|nHsLaozr*m1_wo!3{rU*V2|vjo^iAb5=7nl$KL03j!wlPs-i z6n(q_$%=VwnqX-Po#-zaD<~~5RIj2Bcx=Q=d80H3p>^bVj7kwk5s^1z)m}qVx2G4> zk9O;>u56(k!z$rlat$!|mxiA|cC#Z%dW_xhXb>(c{qyvE0O}BUqgXJg`pzgFH8C(- zR8DW*G%HVHW*`a*Hf>UsgoEBYtyn80b@$QtH8W?STV|uh)I>SWR{qqrwpLcQz8+o< zw`?$%nVFfDm03ygKRy&~;uAF}705*b;+xpa=Km=CjjwndTMYE|=Z;f{3+z%&bjF!N zqB2&H&@hl&Xf0)qo3rl9iL{XiLInr=GM^1aKZZ~%VDFZ)*qtef<)%(@+0sbj8?@fGnU}+~_Xv?kGL!)e_3^7zI3Z&~kxx}t* z%E2MSwvGHbxBYQ67AZ$&+mTJ$DIk-Q^hPsafK6QELM}z4Kn7Y#rtnI{u=bM2RfH;u z)S#9zGV`LhH0HO~;U#d+kb%8c`l0ylK%CFd-sWnHkT} zADtk9xLq>@;)l zPT*uHt)=m%P2$}5u}y59h-%z(k4FWo92pg(VwEC=Qd8EAvn?BM#*X5ii&PC-+fCi3 zBt92NMvKX%SSVepmyl3m?RJ(9k}e(nY$J0|-W`A~P)ss1veH{u?Q`tT^9O}uyBYqZ z8i^;h=j)p6%FlfHyX8?vt z?#k&ffT=xq6tR)yp3;*3#?%z+yHHl-BqX)T6CiBsfoeH2;WgagPGE!Y_^*S%Qc!zT zp$5RXzkunNy>1tew%?L+SH0p$2*^U5P@KO~;vKHcg9N7ji8tjIVl-0_~ z^XZytM3U4i$PMKP^O`@rSHd5>vNu)>77`Bj0UrW!m8Wz?EG!du`!*^af%y%k5(BN* z;kZst62}n1TiVz5P8G=*(3S(w>#7FDNAt@tBAL3ImGm-!WMBpw)xK2s%xP~AnA%b4-#@)$tfafY zgU_=~V{YEarfx)5hjvUvS`4`*h`#Y)deZ(4Lbyjy67LfAQck9#s*X?&Zso+pm>ADQ z4b+d3g0AC@z5As_0@6dzp{Jkn$fGYIcUFA-D7|IE!}bmT+_n+-YD zW4ta}`>}F8|48tGj13$7TX1k#yhh7InSdHIIGnKU>k)Xd65{&B;U1H_Q&i5VafMGf zq|hL_nGT0O;OmkwZ}=?29nIKF@$>46Vx^AYmihf`DVQSa{Q+j0ibR<7;MgV~8PgNc z@j^&G23q1fAsE%ah8Ksk`1QkIyoB(-Y7?|9f3v6jPjTWu)vA9AWm3A9-!O>C--$Iz zYb$F?IYVPd3thYKK^D5k_Bh`J+W!*mXuoNwe_~I6cUk=gkXTv2k)?mRtTNG3(R_nc zjNi7ae|K5^9`zT^_D@le*UH$!5a-{?HHLpFa(_qHm{}O8n3(=auKjEF|4p&`%a`%* zBFNu782|I?{}1HaU*?T}SM2_qz4-r4wfnZ1Wu(PnqM@Op`KHI1zOP+C8Rzes9qT_3 zt^WyuMZ@xM1Xj${H-QB?c;y<3(Znz4o!AJ^w`xLeyNn%i@zX@FRRmuEm-G5j++tBI z3~*-ZvvavNu9cNR$UU>YO}25{wJO4nCPJCmCu=7cN}Rs3{vKjS8#=>Kl&0x!U)Mx} zyS8?p;{|kOq%DP;qvQrMio(?%Je>*o^IEaVXRWM(o5uS_qq=k#{PlWP+*rHqr;Bf+ zVjiE?n=2xR^T3gXkMS_6Bfc5 zJEPsVN?M!ZE~j~Uhn_mFRFB#m&AE@>9Z%Ow#XxlZrvZZvKwHR(l+~>3qy&P;_(8`u zBA_w{tFO2rp&IcSE}Pfp4VV{_kX9x z)t>3Bb`1GkI0gWXNyG8-@I0#}j7Ogr?p0^)GIl(=p8#Qp71Iom>F?0Qthmd~%VYz1 zK+oQs0Rrl2%~p*r`k2s!NUCz1N8y7F%?OfFF__wYT2rJOlQC_I4Wt7($4T7+V;PUjXhIHtqj`ju+ zVV(}iY9ovqUkoEUeF2n52_^!3@f?hOeD-0;;iXV(4)m33{d%ebu9QSJWWJ}odR~fC zV)LFHH~*^~M}pv+S$YT1Ju$<5lp&Gm6VUcNUOMP~X?`gMZ!%e^)X3Hucn5MddXXAo znK&p$DusYk8S++CO2IM{C}i=?{oZ%MxGdB`MvQ@9#vtL#!A2r(gE=k+yeBe?-fA)+ zYTk(eSrNxa$GZ2RY{8kr)H!>{fUW+0*MQ{%H!Ltm`p8ausxHu%y%5*n?bz7eFuVNE zs_#Uf5L|)MvIM#swEndie7NU55?2CD{CLig@074b0J_n+KX;LEAZHzT+JAuhDU9KE zB7*ukpyL+a>PBDV^iO?{xF-yj{-Nst2I|*OGlbKCwCe)d&hzLItho$SgWiT~)!hwO zp8|A)!2z;MwGMRi3(|wP4H5Qg=na?48;0i{#}oE@C9wTdyT-Vl2wh+|v>uSV@(S)A zNLk+{KZOHcZCG~6$j(w{8Xp(L-KQHf7Z1X88m$F+D^ubgiYwYnRHIVOH0d9TA=mw} zR#MZ<9)>~7gtL)`VHKtmw&RkbMnNJ*$Ak__1V@%be=nyclFDAU3x*-*0%eG+ugR)Q zKfNnCAh<+gF+>bRP*m#L0)>SkSEw}qKB)QJj2U`KnN&JvnP-+@ zpJsC@%$BQmscC@6%BXkZkH(N8pdmKKb&|(ok>2HBI|=YakshCUK!A{LCJ>~#AJPC% z8fey+nj#cq)Y}pOVMm=1-K~zhFt!f`uA`Z1HI5m_(;_W}CBC>L7hE`?x3 zJ=fWL#VhMT+BfP;r5?KK5QkE)tbeLZlw{OHX~m`51ufk3B9ImUs-vvX%K%A8x59{7 z419+|B@4(d=eU%Iv$j935ZJ>_Jn`2f6NxChZnAv%ew~)#ox{(?LAXN%gWMs#k4exg zjj=e-2Ap>|qdNqZ5Na?l{@73NYf?64@5{4oph>>4yYWr0fy4DNdEAZxF65wx-}1^{ ze+I7!#?gbDHBc^C3*fk)@EA1M>7?~_l()MU| zxdyr4Ou#)Xu=xhht`enZ9X#Y)^W>{RP}&tk+eLR3vD%ugwhy6Ns+777OLZ9QtEQ5%k$IRX z)N(ME7ALE%6VswgESqUpa?fvR(MKToXzL8>`6On;w%ZmacZ8YlQW?r1^=Z;fp#@|l z`%>IGLuC!n09@shV)?3WY4NJY~e7AGSuw^yKna^B* zSL_u+F$b!aD6=qLOOakb4~WbGZUJZ*MTJ-jf#a5x?^GZVT96F;WDk)`xs)Fh%F#yD9X5gt zDRr@&tuopLKN94{T9XrHILr~+Ez`aHToqTib=a)H=mAj?WhjxvB1T$YW||4zN(UHB zW}{)5{$qY{?c_VVGsd6J{sS`BR~KDT(H(&O>vp1}`TG6&Ls5G6MS`NDg3Z+Q*z|Ck zTJWTiuzgTE9EO!--t(3Lf+3^=MFOJlSQ0=$g;5QXCr{ezEOB-?Jet~?VK8s17~{Za zZYvqqAu$`B%+S58y@0N1vNSbWo;9LQVPIxe)t)z~V?;cOlRuOqLv-B1<3UE86=@9~ zl_4y?mU!JQkgjjwtf_aFKJeI}x#IV}Wf;{mXUDjcaPVeOUZ2I%3InWDIS|7nN+4>8&gFF9dYD$HAnnL*EB(DJ!0@;7; z8Hyy!d)h%icL5bKLBFm{(#ela?$h{bv_HkRAo0=DV=55V-AOvvk+pKGAK79RCsHJJ zK>KkP4hl^gI};!eYX$FSuQi(~d_m4Ul*X17gTri!DSdY^vBH2m@}u|_0e=`Jk{heE z+xmw52Ij@V)S`5crGz;g27fw}+i{~S!P=e|n-I&J3xMMz0)H9A_~%=2_R$v{L6lISF=?OhJ=yxKhU9d!x9SykP8R#e$JEq|x~fQJp>)gt)`Lh^iZD*WNhGaE>1 z#}OE)qomP#knHof;Oh6B+KDO!zM!`q?7*K+6ml~L&n#AP<@4R zUWA`eccH5g2=I%8g90h_w1LASCdU*OBbZ>eTxsh3_1`#bJ8PM#m_sPu)Sz5o$K#|o%r(F z?AJR8L3Eihtu>Vb+z#;Nvv`oO^t8V5Bi&OOzC7I3b^^OJRO?OrCSVO-7vs&j>)BsWSg@|MT&9LV32QY%WG2T7V>O8e1Sx$L85F+tF!z zYH8)1LLL(dxtCUMNX2|?=s)A0tOe$CUmGbWV(zTQtC+6ZG z+*VSu^N8mAqPDJOKp`fUYEa)=C#Ok^H3o0EV5u;2yH*Hdu2`oh7b;m8JAYhn7bnF` zPnhvB2(-g>OS=s727LIsT|8s5fKeW<%6S(~C_A$Eup<<9j?nH)FsX)u<|JM{`v3HH z<#9FbTilI@jyV}aM}}PLw&y`IhoVqO^X(=iDorXwl(A8RS+`Q1E()n!N)l3%l89VM zhA0hJB_-*to%_7bZ{Pd*yuaS*pM6&AH>|bSZ~fNqti8W$L)xLHAkT_3D<6(-_Vv@~ zS$#7*wrtzuw6T9Cm+0DdC`muOPLDj6bGv@b(j@=d>g|ah?(QFJp8B8j_5EqF%jq(U z<0qHAZ5S3{rfwxVy|kLaO}-&{uA@F(}1}6kZYrhwbobaUYoM1sKI@W%GeK?9>W`gf+DR~rEt$i zr92qn_fq3)NoB9%Iz#uW$EEuYJ&d+-3$l$qJ8Dn;^Cvyt)wC#FZf@64^1%ajP0Ej) z<1Oe-4%<6Q?KS_XskxMf71`JY7!aPH(q*^*IK#BI52#7@&$bt9mkHecZ}bX(mH6qYdft zms5AWYe{%BqW;F^^j{NN7f5<~4)%`!TwAKJZE~jdotQhno4aU^Eem!t&VHS*U|bpY zV`2P(E2X^`I}X>p!;UCFMJ=&76;xtBL2bp&VFekTFId+mus9BIt zZ^IvA?_dd$7l>066Ejtpw+y4psu!JpQ~q#ff!F8fIyhdQ|LlYg7ck4%D(pr6?K3eY zTaB6;D+8wYP`kG`RQaa0UVXXopfQfyHdnOI&t3QG@jk=F*6uw*W5&e1a7mS{8@Mh6 zGfk6}>DCuyXWXs#YOUQfZ`S6zT-_4q`sIyNJH5%ym)hAr(OFg1eFiP?+HLH$@Xw#d zU)PA>cGZO#9d~Q1JZ9$SK6hiDQ;X$;LrV=Z|Ge&c{#|N-`fl~3cOSL}#Z+9`@Uqiy zO=FI9(8j!{7-Z_XXR=Tu~=>l3i3Gr_sv zi1CYWHT&#QeiF1r&8bcE^{dK8K6MP&W89O08GZP)X`1D^4d=cLEtINEWs5szcF+@& zvke34As|l=X)aU6Voj#ZR_i(NQEtfHEwV>7js{QZt@)B8)79J8C|rv*-+y<}$%A2z z)Z<|*tDddcpmF|==UL}|9o7q;sAg9l{CYbt_i;)}RJi?_3sw6Q<|UqaZZ}QO?8VU) zrZcWoY}Aw|54Wqiwk6(ZYu?y<=FS=U9?M>g zH+foJ`7m1XqITuHu$>O$UPU=cSACs(f2oJot$l5hy2Irezi+J4F7q+mW#RS1YlUpJ z*~`_}ecTXCKicvpVzRrRMRNUvgnO+Otr^GFnTG20%xKTpiKZKt=qo=@KD~FAad^cl zi)VF8OI71eL>i_j)XlnXw?3)%dd>Uvy^(jGUD1BOR_A`?WBVWdr;$ZwJN+I^@SkzX zf83F+b}7Z<8eEQ!Rs0Zq@b*?G(~ft6@7^5>wF&UPQF6*Cv1KF_Fv!sM`FJaFcjA|$ z1-n}dM&A0`)GyS`BUtzD^}Lg#)1z`e{cI`e^gClY@kG|#Qj^lm`xQFPTKKRNul zy}47Ooyd>&(^)uYfO?}R&D_@x{CTb_M?I%>k}Q6;k%ZkVxomHH_?o?0zFWLu$;g-k2mFt(PJaIKM{Ctt zW%g4qUF_U3ueC>@X4!y5i-U%9>J|)b{E|Py#kqXwt18CirE~|D7=PtTeB;omp`#~0 z9hmv@Tv(ps*+-{GGRG>)%wL87vO&q%IIJIQcjR+g;#3-*0Uu^Kv+cMBfb*RiZrD=G_ z=bbwq&Q(jDTs-%3eZ|E7YYTE5e!YG6LCTT3X>WZDbqwg>n+|z4SA6~aq``h+&Qr=3 zSotd5bUx6#Va=vMBfY+<{B`{$Mh8Ye-MDVj+S3z9>$jwA)-cl3Xnvae^nPc9EKA!r zll|O7XQ7U4|DpAYC&C+>!o!*xBMKj`$k3r5>7@~hk%^j_cbzwEH}f-)9np=S-V|m$ z=Hlk@ZJ(@;+Am&wH!UGJYQ2BizqXAU@k@j`&-SNO?^LTSn6aR$a=rq{v)Y{^fE%DDc1or5lx4zLvGfN;JW z0^)y@K!2ZX)%%ugg*2<1tDTFBjhp5_K!ggfN(x%Q5%|i+i5G`?xz~xh?U0EWzv{5&lzW>T1PMvniY}7rm^-q$0Plf%z-*yDh zP8v!)cGZ#u;mrYgQ_7M6qAB2aas&W*Lg_$rh81u$g#wl&4;R44VSp3L!M^}w697Xo z9QvVv3C93C6skhPm4G73)580BUCi*BDm0^n|1eNFk|hO$fbTFI4Kx9zlj#B<<$w=x zJ%PqU4=e{)1b~g20}`juDa?(aNP&fgD=2tr4)CB-8cI;YH=!v{z!kir3EcsW#0i+B zAR-1eIZ2|6U!W5pAc+ZA@RA~w*c6hK!rwOPAbwvUfCCEcj!ay8MkphjH(>zE)36rIHsL$E;4d5R^XCMv*4e7?+0^e zH`#byZ9C~R#AjT}Fhz2X0sFCq`90FT&NI2#@N>f8jN7wx*W`Z9a*Yr5n7&0d>6LS1 zrIBn!V#NAo5<+?}Vr_K)hdY@i(_e2oc5(Kk-+Db-;w2%Xm$??DDw>U!DTSK(-d{RR zCdsGVO2;crSILP;o^WbRgl~&9I8M3a;(#ghFZOvb>^Fs&t+&VJsbixCf3d6G<>FcN zr01p~#}*IXP^DkE>L1^HVL^PHZ(>MV{S-vfS;i&E@0;k0^0-&|gwiJoz8KVdqb_0}; z#YK81I4G5j@;V@**7|r77xIhC*kU7ID z7wMj6c>(@g_Q++Ah@VM z4z+u*0~WP=f&y%FLuQSKyW$e`rrv%w8sOyhsa0pP~!l# zaUNVEsu!MMMSg=PF_ACk;VgvMB*?*`dnk@!Bx-*^6G|S)=?`cux+lXy3xiV^!Ux+M z+7?PIh-?zdAE3SodczpRUjdCo*NiZ#*uOyK4aCO5>5`~T0u4$uAallP9`y$}Sb=C< zf&hnwv<0LV(Vj<;Fo=jx6ieZ7aD#|bUVjAKOw|4eIJt_(wu!6A_hVO z|A+crcr=z1!hN|hC>}%OUHD0bK;urF2D?J_$6}&b6xz~4xdgc}Sd4{(yoe^0Rgvdo zAxIa|IMjFIfR}~tjW`D`0@VTdE0K+J5YnLf;~{QB?SN+}kxpTo5q|iPn={X0s7?X6 z3j~qpgR=&PwgsAKT@o1FUt|md!asC=Km!kmi~-Rp>R$*P$I*BQb{^qZ9633FCi3?{ z(BL;2k!)}#LDw6^wxakHXyQ48jiYNH zVkrV$`#_^nUqaBB=&VKvaV%nMKx5$@26>%A%-TH;b91q=-)QF|nK47l*xuW&>pZPx ud)N#VyvJ;$|9Qs_o1oW5P%Y+T&B&fcI(@avLElXRCZko+4A6q%I( literal 0 HcmV?d00001 From caf38293001693253a516250d62784299096cc46 Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:07:51 +0300 Subject: [PATCH 09/54] Fix: Use Share PublicKey for Share Removal (#1140) * remove share & SP data with share pubkey fix * move tx related logic to event handler + tests --- ekm/eth_key_manager_signer.go | 25 +++++- ekm/signer_storage.go | 14 +++- eth/eventhandler/event_handler.go | 12 ++- eth/eventhandler/event_handler_test.go | 112 +++++++++++++++++++++---- eth/eventhandler/handlers.go | 43 +++++----- eth/eventhandler/task.go | 43 +++++----- eth/eventhandler/task_executor_test.go | 3 +- logging/fields/fields.go | 5 ++ operator/validator/controller.go | 17 +--- operator/validator/mocks/controller.go | 8 +- operator/validator/task_executor.go | 37 +++----- 11 files changed, 205 insertions(+), 114 deletions(-) diff --git a/ekm/eth_key_manager_signer.go b/ekm/eth_key_manager_signer.go index 21c663a5b1..3d7ff82cf8 100644 --- a/ekm/eth_key_manager_signer.go +++ b/ekm/eth_key_manager_signer.go @@ -43,9 +43,16 @@ type ethKeyManagerSigner struct { builderProposals bool } +// StorageProvider provides the underlying KeyManager storage. +type StorageProvider interface { + ListAccounts() ([]core.ValidatorAccount, error) + RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) + RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) +} + // NewETHKeyManagerSigner returns a new instance of ethKeyManagerSigner func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network networkconfig.NetworkConfig, builderProposals bool, encryptionKey string) (spectypes.KeyManager, error) { - signerStore := NewSignerStorage(db, network.Beacon.GetNetwork(), logger) + signerStore := NewSignerStorage(db, network.Beacon, logger) if encryptionKey != "" { err := signerStore.SetEncryptionKey(encryptionKey) if err != nil { @@ -85,6 +92,18 @@ func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network netw }, nil } +func (km *ethKeyManagerSigner) ListAccounts() ([]core.ValidatorAccount, error) { + return km.storage.ListAccounts() +} + +func (km *ethKeyManagerSigner) RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) { + return km.storage.RetrieveHighestAttestation(pubKey) +} + +func (km *ethKeyManagerSigner) RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) { + return km.storage.RetrieveHighestProposal(pubKey) +} + func (km *ethKeyManagerSigner) SignBeaconObject(obj ssz.HashRoot, domain phase0.Domain, pk []byte, domainType phase0.DomainType) (spectypes.Signature, [32]byte, error) { sig, rootSlice, err := km.signBeaconObject(obj, domain, pk, domainType) if err != nil { @@ -260,7 +279,7 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return errors.Wrap(err, "could not check share existence") } if acc == nil { - currentSlot := km.storage.Network().EstimatedCurrentSlot() + currentSlot := km.storage.BeaconNetwork().EstimatedCurrentSlot() if err := km.saveMinimalSlashingProtection(shareKey.GetPublicKey().Serialize(), currentSlot); err != nil { return errors.Wrap(err, "could not save minimal slashing protection") } @@ -273,7 +292,7 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { } func (km *ethKeyManagerSigner) saveMinimalSlashingProtection(pk []byte, currentSlot phase0.Slot) error { - currentEpoch := km.storage.Network().EstimatedEpochAtSlot(currentSlot) + currentEpoch := km.storage.BeaconNetwork().EstimatedEpochAtSlot(currentSlot) highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance highestSource := highestTarget - 1 highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance diff --git a/ekm/signer_storage.go b/ekm/signer_storage.go index 5991e6f321..fc8eadd62e 100644 --- a/ekm/signer_storage.go +++ b/ekm/signer_storage.go @@ -47,17 +47,19 @@ type Storage interface { SetEncryptionKey(newKey string) error ListAccountsTxn(r basedb.Reader) ([]core.ValidatorAccount, error) SaveAccountTxn(rw basedb.ReadWriter, account core.ValidatorAccount) error + + BeaconNetwork() beacon.BeaconNetwork } type storage struct { db basedb.Database - network beacon.Network + network beacon.BeaconNetwork encryptionKey []byte logger *zap.Logger // struct logger is used because core.Storage does not support passing a logger lock sync.RWMutex } -func NewSignerStorage(db basedb.Database, network beacon.Network, logger *zap.Logger) Storage { +func NewSignerStorage(db basedb.Database, network beacon.BeaconNetwork, logger *zap.Logger) Storage { return &storage{ db: db, network: network, @@ -87,7 +89,7 @@ func (s *storage) DropRegistryData() error { } func (s *storage) objPrefix(obj string) []byte { - return []byte(string(s.network.BeaconNetwork) + obj) + return []byte(string(s.network.GetBeaconNetwork()) + obj) } // Name returns storage name. @@ -97,7 +99,7 @@ func (s *storage) Name() string { // Network returns the network storage is related to. func (s *storage) Network() core.Network { - return core.Network(s.network.BeaconNetwork) + return core.Network(s.network.GetBeaconNetwork()) } // SaveWallet stores the given wallet. @@ -406,3 +408,7 @@ func (s *storage) decrypt(data []byte) ([]byte, error) { nonce, ciphertext := data[:nonceSize], data[nonceSize:] return gcm.Open(nil, nonce, ciphertext, nil) } + +func (s *storage) BeaconNetwork() beacon.BeaconNetwork { + return s.network +} diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index 1c909caf88..b207c78a25 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -46,7 +46,7 @@ var ( type taskExecutor interface { StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient ethcommon.Address) error @@ -285,7 +285,7 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, return nil, nil } - sharePK, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) + validatorPubKey, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) if err != nil { eh.metrics.EventProcessingFailed(abiEvent.Name) @@ -298,13 +298,11 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, defer eh.metrics.EventProcessed(abiEvent.Name) - if sharePK == nil { - return nil, nil + if validatorPubKey != nil { + return NewStopValidatorTask(eh.taskExecutor, validatorPubKey), nil } - task := NewStopValidatorTask(eh.taskExecutor, validatorRemovedEvent.PublicKey) - - return task, nil + return nil, nil case ClusterLiquidated: clusterLiquidatedEvent, err := eh.eventParser.ParseClusterLiquidated(event) diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index bf1f96961e..15918a10e9 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -1,6 +1,7 @@ package eventhandler import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -12,13 +13,8 @@ import ( "strings" "testing" - "github.com/bloxapp/ssv/operator/validator" - "github.com/bloxapp/ssv/operator/validator/mocks" - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/bloxapp/ssv/utils/blskeygen" - "github.com/pkg/errors" - + ekmcore "github.com/bloxapp/eth2-key-manager/core" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" @@ -27,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/golang/mock/gomock" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -40,10 +37,14 @@ import ( ibftstorage "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/networkconfig" operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/blskeygen" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/utils/threshold" ) @@ -236,9 +237,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + + requireKeyManagerDataToExist(t, eh, 1, validatorData1) + // Check that validator was registered shares := eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -285,10 +289,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 1, validatorData2) + // Check that validator was not registered, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -332,10 +338,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 2, validatorData2) + // Check that validator was registered for op1, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -351,7 +359,7 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) // Share for 1st operator is malformed; check nonce is bumped correctly; validator wasn't added - t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { + t.Run("test malformed ValidatorAdded and nonce is bumped", func(t *testing.T) { malformedSharesData := sharesData3[:] operatorCount := len(ops) @@ -389,10 +397,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 2, validatorData3) + // Check that validator was not registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -435,10 +445,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 3, validatorData3) + // Check that validator was registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 3, len(shares)) @@ -476,10 +488,14 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() - lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) + requireKeyManagerDataToExist(t, eh, 3, validatorData1) + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, true) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + + requireKeyManagerDataToNotExist(t, eh, 2, validatorData1) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct @@ -584,9 +600,12 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op storageMap := ibftstorage.NewStores() nodeStorage, operatorData := setupOperatorStorage(logger, db, operator) - testNetworkConfig := networkconfig.TestNetwork - keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + mockNetworkConfig := networkconfig.NetworkConfig{ + Beacon: setupMockBeaconNetwork(t), + } + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, mockNetworkConfig, true, "") if err != nil { return nil, nil, err } @@ -607,7 +626,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + mockNetworkConfig.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -644,7 +663,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + mockNetworkConfig.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -658,6 +677,20 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op return eh, nil, nil } +func setupMockBeaconNetwork(t *testing.T) *mocknetwork.MockBeaconNetwork { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockBeaconNetwork := mocknetwork.NewMockBeaconNetwork(ctrl) + + currentSlot := phase0.Slot(100) + mockBeaconNetwork.EXPECT().GetBeaconNetwork().Return(networkconfig.TestNetwork.Beacon.GetBeaconNetwork()).AnyTimes() + mockBeaconNetwork.EXPECT().EstimatedCurrentSlot().Return(currentSlot).AnyTimes() + mockBeaconNetwork.EXPECT().EstimatedEpochAtSlot(gomock.Any()).Return(phase0.Epoch(currentSlot / 32)).AnyTimes() + + return mockBeaconNetwork +} + func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *testOperator) (operatorstorage.Storage, *registrystorage.OperatorData) { if operator == nil { logger.Fatal("empty test operator was passed", zap.Error(fmt.Errorf("empty test operator was passed"))) @@ -774,6 +807,15 @@ type testShare struct { pub *bls.PublicKey } +func shareExist(accounts []ekmcore.ValidatorAccount, sharePubKey []byte) bool { + for _, acc := range accounts { + if bytes.Equal(acc.ValidatorPublicKey(), sharePubKey) { + return true + } + } + return false +} + func createNewValidator(ops []*testOperator) (*testValidatorData, error) { validatorData := &testValidatorData{} sharesCount := uint64(len(ops)) @@ -874,3 +916,37 @@ func generateSharesData(validatorData *testValidatorData, operators []*testOpera return sharesDataSigned, nil } + +func requireKeyManagerDataToExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.True(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) +} + +func requireKeyManagerDataToNotExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.False(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.False(t, found) +} diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 7c25d7e6f4..7bc6e4dade 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -39,10 +39,10 @@ var ( func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.ContractOperatorAdded) error { logger := eh.logger.With( - zap.String("event_type", OperatorAdded), + fields.EventName(OperatorAdded), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.OperatorPubKey(event.PublicKey), ) logger.Debug("processing event") @@ -85,7 +85,7 @@ func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.Cont func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.ContractOperatorRemoved) error { logger := eh.logger.With( - zap.String("event_type", OperatorRemoved), + fields.EventName(OperatorRemoved), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), ) @@ -101,8 +101,8 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co } logger = logger.With( - zap.String("operator_pub_key", ethcommon.Bytes2Hex(od.PublicKey)), - zap.String("owner_address", od.OwnerAddress.String()), + fields.OperatorPubKey(od.PublicKey), + fields.Owner(od.OwnerAddress), ) // TODO: In original handler we didn't delete operator data, so this behavior was preserved. However we likely need to. @@ -124,10 +124,10 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co func (eh *EventHandler) handleValidatorAdded(txn basedb.Txn, event *contract.ContractValidatorAdded) (ownShare *ssvtypes.SSVShare, err error) { logger := eh.logger.With( - zap.String("event_type", ValidatorAdded), + fields.EventName(ValidatorAdded), fields.TxHash(event.Raw.TxHash), fields.Owner(event.Owner), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.OperatorIDs(event.OperatorIds), fields.Validator(event.PublicKey), ) @@ -324,12 +324,12 @@ func validatorAddedEventToShare( return &validatorShare, shareSecret, nil } -func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) ([]byte, error) { +func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) (spectypes.ValidatorPK, error) { logger := eh.logger.With( - zap.String("event_type", ValidatorRemoved), + fields.EventName(ValidatorRemoved), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), fields.PubKey(event.PublicKey), ) logger.Debug("processing event") @@ -372,6 +372,11 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C logger = logger.With(zap.String("validator_pubkey", hex.EncodeToString(share.ValidatorPubKey))) } if isOperatorShare { + err = eh.keyManager.RemoveShare(hex.EncodeToString(share.SharePubKey)) + if err != nil { + return nil, fmt.Errorf("could not remove share from ekm storage: %w", err) + } + eh.metrics.ValidatorRemoved(event.PublicKey) logger.Debug("processed event") return share.ValidatorPubKey, nil @@ -383,10 +388,10 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract.ContractClusterLiquidated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterLiquidated), + fields.EventName(ClusterLiquidated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -405,10 +410,10 @@ func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract. func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract.ContractClusterReactivated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterReactivated), + fields.EventName(ClusterReactivated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -427,9 +432,9 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract func (eh *EventHandler) handleFeeRecipientAddressUpdated(txn basedb.Txn, event *contract.ContractFeeRecipientAddressUpdated) (bool, error) { logger := eh.logger.With( - zap.String("event_type", FeeRecipientAddressUpdated), + fields.EventName(FeeRecipientAddressUpdated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.FeeRecipient(event.RecipientAddress.Bytes()), ) logger.Debug("processing event") diff --git a/eth/eventhandler/task.go b/eth/eventhandler/task.go index 3e825140b8..f6e2894fa8 100644 --- a/eth/eventhandler/task.go +++ b/eth/eventhandler/task.go @@ -1,9 +1,10 @@ package eventhandler import ( + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) type Task interface { @@ -11,15 +12,15 @@ type Task interface { } type startValidatorExecutor interface { - StartValidator(share *ssvtypes.SSVShare) error + StartValidator(share *types.SSVShare) error } type StartValidatorTask struct { executor startValidatorExecutor - share *ssvtypes.SSVShare + share *types.SSVShare } -func NewStartValidatorTask(executor startValidatorExecutor, share *ssvtypes.SSVShare) *StartValidatorTask { +func NewStartValidatorTask(executor startValidatorExecutor, share *types.SSVShare) *StartValidatorTask { return &StartValidatorTask{ executor: executor, share: share, @@ -31,41 +32,41 @@ func (t StartValidatorTask) Execute() error { } type stopValidatorExecutor interface { - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error } type StopValidatorTask struct { - executor stopValidatorExecutor - publicKey []byte + executor stopValidatorExecutor + pubKey spectypes.ValidatorPK } -func NewStopValidatorTask(executor stopValidatorExecutor, publicKey []byte) *StopValidatorTask { +func NewStopValidatorTask(executor stopValidatorExecutor, pubKey spectypes.ValidatorPK) *StopValidatorTask { return &StopValidatorTask{ - executor: executor, - publicKey: publicKey, + executor: executor, + pubKey: pubKey, } } func (t StopValidatorTask) Execute() error { - return t.executor.StopValidator(t.publicKey) + return t.executor.StopValidator(t.pubKey) } type liquidateClusterExecutor interface { - LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error + LiquidateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error } type LiquidateClusterTask struct { executor liquidateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toLiquidate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toLiquidate []*types.SSVShare } func NewLiquidateClusterTask( executor liquidateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toLiquidate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toLiquidate []*types.SSVShare, ) *LiquidateClusterTask { return &LiquidateClusterTask{ executor: executor, @@ -80,21 +81,21 @@ func (t LiquidateClusterTask) Execute() error { } type reactivateClusterExecutor interface { - ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error + ReactivateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error } type ReactivateClusterTask struct { executor reactivateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toReactivate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toReactivate []*types.SSVShare } func NewReactivateClusterTask( executor reactivateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toReactivate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toReactivate []*types.SSVShare, ) *ReactivateClusterTask { return &ReactivateClusterTask{ executor: executor, diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index 8792aadc91..e692981f42 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -3,9 +3,10 @@ package eventhandler import ( "context" "encoding/binary" - "github.com/golang/mock/gomock" "testing" + "github.com/golang/mock/gomock" + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" diff --git a/logging/fields/fields.go b/logging/fields/fields.go index 6b1de4ffc5..3584f07915 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -64,6 +64,7 @@ const ( FieldName = "name" FieldNetwork = "network" FieldOperatorId = "operator_id" + FieldOperatorIDs = "operator_ids" FieldOperatorPubKey = "operator_pubkey" FieldOwnerAddress = "owner_address" FieldPeerID = "peer_id" @@ -190,6 +191,10 @@ func OperatorID(operatorId spectypes.OperatorID) zap.Field { return zap.Uint64(FieldOperatorId, operatorId) } +func OperatorIDs(operatorIDs []spectypes.OperatorID) zap.Field { + return zap.Uint64s(FieldOperatorIDs, operatorIDs) +} + func OperatorIDStr(operatorId string) zap.Field { return zap.String(FieldOperatorId, operatorId) } diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 1ec3efa634..eb43107100 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -104,7 +104,7 @@ type Controller interface { IndicesChangeChan() chan struct{} StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient common.Address) error @@ -645,24 +645,15 @@ func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.Validator } } -// onShareRemove is called when a validator was removed -// TODO: think how we can make this function atomic (i.e. failing wouldn't stop the removal of the share) -func (c *controller) onShareRemove(pk string, removeSecret bool) error { +// onShareStop is called when a validator was removed or liquidated +func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { // remove from validatorsMap - v := c.validatorsMap.RemoveValidator(pk) + v := c.validatorsMap.RemoveValidator(hex.EncodeToString(pubKey)) // stop instance if v != nil { v.Stop() } - // remove the share secret from key-manager - if removeSecret { - if err := c.keyManager.RemoveShare(pk); err != nil { - return errors.Wrap(err, "could not remove share secret from key manager") - } - } - - return nil } func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 6b743f6747..601d72a936 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -219,17 +219,17 @@ func (mr *MockControllerMockRecorder) StartValidators() *gomock.Call { } // StopValidator mocks base method. -func (m *MockController) StopValidator(publicKey []byte) error { +func (m *MockController) StopValidator(pubKey types.ValidatorPK) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopValidator", publicKey) + ret := m.ctrl.Call(m, "StopValidator", pubKey) ret0, _ := ret[0].(error) return ret0 } // StopValidator indicates an expected call of StopValidator. -func (mr *MockControllerMockRecorder) StopValidator(publicKey interface{}) *gomock.Call { +func (mr *MockControllerMockRecorder) StopValidator(pubKey interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), publicKey) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), pubKey) } // UpdateFeeRecipient mocks base method. diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 0ea2191716..5fd3a4c27b 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -1,17 +1,17 @@ package validator import ( - "encoding/hex" "fmt" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/ethereum/go-ethereum/common" "go.uber.org/multierr" "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logger { @@ -20,7 +20,7 @@ func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logge With(fields...) } -func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { +func (c *controller) StartValidator(share *types.SSVShare) error { // logger := c.taskLogger("StartValidator", fields.PubKey(share.ValidatorPubKey)) // Since we don't yet have the Beacon metadata for this validator, @@ -30,41 +30,30 @@ func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { return nil } -func (c *controller) StopValidator(publicKey []byte) error { - logger := c.taskLogger("StopValidator", fields.PubKey(publicKey)) +func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { + logger := c.taskLogger("StopValidator", fields.PubKey(pubKey)) - c.metrics.ValidatorRemoved(publicKey) - if err := c.onShareRemove(hex.EncodeToString(publicKey), true); err != nil { - return err - } + c.metrics.ValidatorRemoved(pubKey) + c.onShareStop(pubKey) logger.Info("removed validator") return nil } -func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("LiquidateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error { + logger := c.taskLogger("LiquidateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) for _, share := range toLiquidate { - // we can't remove the share secret from key-manager - // due to the fact that after activating the validators (ClusterReactivated) - // we don't have the encrypted keys to decrypt the secret, but only the owner address - if err := c.onShareRemove(hex.EncodeToString(share.ValidatorPubKey), false); err != nil { - return err - } - logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("removed share") + c.onShareStop(share.ValidatorPubKey) + logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("liquidated share") } return nil } -func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("ReactivateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error { + logger := c.taskLogger("ReactivateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) var startedValidators int var errs error From b681a89d8ad86b2419bb52f95a75b2cc8a57609e Mon Sep 17 00:00:00 2001 From: guym-blox <83158283+guym-blox@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:20:44 +0300 Subject: [PATCH 10/54] fix generate operator keys command (#1142) * fix generate operator keys command * add #nosec G304 for file reading * extract I/O operations to seperate functions * switch logic to rsa package functions * clean code and add public key to keystore --- cli/generate_operator_keys.go | 152 +++++++++++++++++----------------- 1 file changed, 74 insertions(+), 78 deletions(-) diff --git a/cli/generate_operator_keys.go b/cli/generate_operator_keys.go index d9a4a4b7a8..1a4c85fd16 100644 --- a/cli/generate_operator_keys.go +++ b/cli/generate_operator_keys.go @@ -1,116 +1,61 @@ package cli import ( - "crypto/x509" "encoding/base64" "encoding/json" - "encoding/pem" - - keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" - + "log" "os" "path/filepath" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/spf13/cobra" + "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" "go.uber.org/zap" ) -// generateOperatorKeysCmd is the command to generate operator private/public keys var generateOperatorKeysCmd = &cobra.Command{ Use: "generate-operator-keys", Short: "generates ssv operator keys", Run: func(cmd *cobra.Command, args []string) { - logger := zap.L().Named(RootCmd.Short) + if err := logging.SetGlobalLogger("debug", "capital", "console", nil); err != nil { + log.Fatal(err) + } + logger := zap.L().Named(logging.NameExportKeys) passwordFilePath, _ := cmd.Flags().GetString("password-file") privateKeyFilePath, _ := cmd.Flags().GetString("operator-key-file") - pk, sk, err := rsaencryption.GenerateKeys() - if err != nil && privateKeyFilePath == "" { - logger.Fatal("Failed to create key and operator key wasn't provided", zap.Error(err)) - } - // Resolve to absolute path - passwordAbsPath, err := filepath.Abs(passwordFilePath) - if err != nil { - logger.Fatal("Failed to read absolute path of password file", zap.Error(err)) - } - - // Now read the file - // #nosec G304 - passwordBytes, err := os.ReadFile(passwordAbsPath) + pk, sk, err := rsaencryption.GenerateKeys() if err != nil { - logger.Fatal("Failed to read password file", zap.Error(err)) + logger.Fatal("Failed to generate keys", zap.Error(err)) } - encryptionPassword := string(passwordBytes) - if privateKeyFilePath != "" { - // Resolve to absolute path - privateKeyAbsPath, err := filepath.Abs(privateKeyFilePath) + keyBytes, err := readFile(privateKeyFilePath) if err != nil { - logger.Fatal("Failed to read absolute path of private key file", zap.Error(err)) + logger.Fatal("Failed to read private key from file", zap.Error(err)) } - - // Now read the file - // #nosec G304 - privateKeyBytes, _ := os.ReadFile(privateKeyAbsPath) - if privateKeyBytes != nil { - keyBytes, err := base64.StdEncoding.DecodeString(string(privateKeyBytes)) - if err != nil { - logger.Fatal("base64 decoding failed", zap.Error(err)) - } - - keyPem, _ := pem.Decode(keyBytes) - if keyPem == nil { - logger.Fatal("failed to decode PEM", zap.Error(err)) - } - - rsaKey, err := x509.ParsePKCS1PrivateKey(keyPem.Bytes) - if err != nil { - logger.Fatal("failed to parse RSA private key", zap.Error(err)) - } - - skPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(rsaKey), - }, - ) - - operatorPublicKey, _ := rsaencryption.ExtractPublicKey(rsaKey) - publicKey, _ := base64.StdEncoding.DecodeString(operatorPublicKey) - sk = skPem - pk = publicKey + sk, pk, err = parsePrivateKey(keyBytes) + if err != nil { + logger.Fatal("Failed to read private key from file", zap.Error(err)) } } - if err := logging.SetGlobalLogger("debug", "capital", "console", nil); err != nil { - logger.Fatal("", zap.Error(err)) - } - - if err != nil { - logger.Fatal("Failed to generate operator keys", zap.Error(err)) - } - logger.Info("generated public key (base64)", zap.String("pk", base64.StdEncoding.EncodeToString(pk))) - - if encryptionPassword != "" { - encryptedData, err := keystorev4.New().Encrypt(sk, encryptionPassword) + if passwordFilePath != "" { + passwordBytes, err := readFile(passwordFilePath) if err != nil { - logger.Fatal("Failed to encrypt private key", zap.Error(err)) + logger.Fatal("Failed to read password file", zap.Error(err)) } - - encryptedJSON, err := json.Marshal(encryptedData) - if err != nil { - logger.Fatal("Failed to marshal encrypted data to JSON", zap.Error(err)) + encryptedJSON, encryptedJSONErr := encryptPrivateKey(sk, pk, passwordBytes) + if encryptedJSONErr != nil { + logger.Fatal("Failed to encrypt private key", zap.Error(err)) } - - err = os.WriteFile("encrypted_private_key.json", encryptedJSON, 0600) + err = writeFile("encrypted_private_key.json", encryptedJSON) if err != nil { - logger.Fatal("Failed to write encrypted private key to file", zap.Error(err)) + logger.Fatal("Failed to save private key", zap.Error(err)) + } else { + logger.Info("private key encrypted and stored in encrypted_private_key.json") } - - logger.Info("private key encrypted and stored in encrypted_private_key.json") } else { logger.Info("generated public key (base64)", zap.String("pk", base64.StdEncoding.EncodeToString(pk))) logger.Info("generated private key (base64)", zap.String("sk", base64.StdEncoding.EncodeToString(sk))) @@ -118,6 +63,57 @@ var generateOperatorKeysCmd = &cobra.Command{ }, } +func parsePrivateKey(keyBytes []byte) ([]byte, []byte, error) { + decodedBytes, err := base64.StdEncoding.DecodeString(string(keyBytes)) + if err != nil { + return nil, nil, err + } + rsaKey, err := rsaencryption.ConvertPemToPrivateKey(string(decodedBytes)) + if err != nil { + return nil, nil, err + } + + skPem := rsaencryption.PrivateKeyToByte(rsaKey) + + operatorPublicKey, err := rsaencryption.ExtractPublicKey(rsaKey) + if err != nil { + return nil, nil, err + } + pk, err := base64.StdEncoding.DecodeString(operatorPublicKey) + if err != nil { + return nil, nil, err + } + return skPem, pk, nil +} + +func encryptPrivateKey(sk []byte, pk []byte, passwordBytes []byte) ([]byte, error) { + encryptionPassword := string(passwordBytes) + encryptedData, err := keystorev4.New().Encrypt(sk, encryptionPassword) + if err != nil { + return nil, err + } + encryptedData["publicKey"] = base64.StdEncoding.EncodeToString(pk) + encryptedJSON, err := json.Marshal(encryptedData) + if err != nil { + return nil, err + } + return encryptedJSON, nil +} + +func writeFile(fileName string, data []byte) error { + return os.WriteFile(fileName, data, 0600) +} + +func readFile(filePath string) ([]byte, error) { + absPath, err := filepath.Abs(filePath) + if err != nil { + return nil, err + } + // #nosec G304 + contentBytes, err := os.ReadFile(absPath) + return contentBytes, err +} + func init() { generateOperatorKeysCmd.Flags().StringP("password-file", "p", "", "File path to the password used to encrypt the private key") generateOperatorKeysCmd.Flags().StringP("operator-key-file", "o", "", "File path to the operator private key") From b2a8ec4e7d289abc4fd1b62b55f072c15f9ed5ae Mon Sep 17 00:00:00 2001 From: olegshmuelov <45327364+olegshmuelov@users.noreply.github.com> Date: Mon, 18 Sep 2023 15:40:29 +0300 Subject: [PATCH 11/54] Implement Slashing Protection Data Update for Reactivated Validators (#1144) --- ekm/eth_key_manager_signer.go | 142 ++++++++++++++----- ekm/signer_key_manager_test.go | 55 +++++--- eth/eventhandler/event_handler_test.go | 186 ++++++++++++++++++++----- eth/eventhandler/handlers.go | 8 ++ eth/eventhandler/local_events_test.go | 4 +- eth/eventhandler/task_executor_test.go | 4 +- utils/testutils.go | 55 ++++++++ 7 files changed, 360 insertions(+), 94 deletions(-) create mode 100644 utils/testutils.go diff --git a/ekm/eth_key_manager_signer.go b/ekm/eth_key_manager_signer.go index 3d7ff82cf8..6d4f098e00 100644 --- a/ekm/eth_key_manager_signer.go +++ b/ekm/eth_key_manager_signer.go @@ -29,9 +29,16 @@ import ( "github.com/bloxapp/ssv/storage/basedb" ) -// minimal att&block epoch/slot distance to protect slashing -var minimalAttSlashingProtectionEpochDistance = phase0.Epoch(0) -var minimalBlockSlashingProtectionSlotDistance = phase0.Slot(0) +const ( + // minSPAttestationEpochGap is the minimum epoch distance used for slashing protection in attestations. + // It defines the smallest allowable gap between the source and target epochs in an existing attestation + // and those in a new attestation, helping to prevent slashable offenses. + minSPAttestationEpochGap = phase0.Epoch(0) + // minSPProposalSlotGap is the minimum slot distance used for slashing protection in block proposals. + // It defines the smallest allowable gap between the current slot and the slot of a new block proposal, + // helping to prevent slashable offenses. + minSPProposalSlotGap = phase0.Slot(0) +) type ethKeyManagerSigner struct { wallet core.Wallet @@ -48,6 +55,7 @@ type StorageProvider interface { ListAccounts() ([]core.ValidatorAccount, error) RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) + BumpSlashingProtection(pubKey []byte) error } // NewETHKeyManagerSigner returns a new instance of ethKeyManagerSigner @@ -279,9 +287,8 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return errors.Wrap(err, "could not check share existence") } if acc == nil { - currentSlot := km.storage.BeaconNetwork().EstimatedCurrentSlot() - if err := km.saveMinimalSlashingProtection(shareKey.GetPublicKey().Serialize(), currentSlot); err != nil { - return errors.Wrap(err, "could not save minimal slashing protection") + if err := km.BumpSlashingProtection(shareKey.GetPublicKey().Serialize()); err != nil { + return errors.Wrap(err, "could not bump slashing protection") } if err := km.saveShare(shareKey); err != nil { return errors.Wrap(err, "could not save share") @@ -291,23 +298,6 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return nil } -func (km *ethKeyManagerSigner) saveMinimalSlashingProtection(pk []byte, currentSlot phase0.Slot) error { - currentEpoch := km.storage.BeaconNetwork().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance - highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance - - minAttData := minimalAttProtectionData(highestSource, highestTarget) - - if err := km.storage.SaveHighestAttestation(pk, minAttData); err != nil { - return errors.Wrapf(err, "could not save minimal highest attestation for %s", string(pk)) - } - if err := km.storage.SaveHighestProposal(pk, highestProposal); err != nil { - return errors.Wrapf(err, "could not save minimal highest proposal for %s", string(pk)) - } - return nil -} - func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { km.walletLock.Lock() defer km.walletLock.Unlock() @@ -334,28 +324,110 @@ func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { return nil } -func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { - key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") +// BumpSlashingProtection updates the slashing protection data for a given public key. +func (km *ethKeyManagerSigner) BumpSlashingProtection(pubKey []byte) error { + currentSlot := km.storage.BeaconNetwork().EstimatedCurrentSlot() + + // Update highest attestation data for slashing protection. + if err := km.updateHighestAttestation(pubKey, currentSlot); err != nil { + return err + } + + // Update highest proposal data for slashing protection. + if err := km.updateHighestProposal(pubKey, currentSlot); err != nil { + return err + } + + return nil +} + +// updateHighestAttestation updates the highest attestation data for slashing protection. +func (km *ethKeyManagerSigner) updateHighestAttestation(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest attestation data stored for the given public key. + retrievedHighAtt, found, err := km.RetrieveHighestAttestation(pubKey) if err != nil { - return errors.Wrap(err, "could not generate HDKey") + return fmt.Errorf("could not retrieve highest attestation: %w", err) } - account := wallets.NewValidatorAccount("", key, nil, "", nil) - if err := km.wallet.AddValidatorAccount(account); err != nil { - return errors.Wrap(err, "could not save new account") + + currentEpoch := km.storage.BeaconNetwork().EstimatedEpochAtSlot(slot) + minimalSP := km.computeMinimalAttestationSP(currentEpoch) + + // Check if the retrieved highest attestation data is valid and not outdated. + if found && retrievedHighAtt != nil { + if retrievedHighAtt.Source.Epoch >= minimalSP.Source.Epoch || retrievedHighAtt.Target.Epoch >= minimalSP.Target.Epoch { + return nil + } + } + + // At this point, either the retrieved attestation data was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection data. + if err := km.storage.SaveHighestAttestation(pubKey, minimalSP); err != nil { + return fmt.Errorf("could not save highest attestation: %w", err) } + return nil } -func minimalAttProtectionData(source, target phase0.Epoch) *phase0.AttestationData { +// updateHighestProposal updates the highest proposal slot for slashing protection. +func (km *ethKeyManagerSigner) updateHighestProposal(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest proposal slot stored for the given public key. + retrievedHighProp, found, err := km.RetrieveHighestProposal(pubKey) + if err != nil { + return fmt.Errorf("could not retrieve highest proposal: %w", err) + } + + minimalSPSlot := km.computeMinimalProposerSP(slot) + + // Check if the retrieved highest proposal slot is valid and not outdated. + if found && retrievedHighProp != 0 { + if retrievedHighProp >= minimalSPSlot { + return nil + } + } + + // At this point, either the retrieved proposal slot was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection slot. + if err := km.storage.SaveHighestProposal(pubKey, minimalSPSlot); err != nil { + return fmt.Errorf("could not save highest proposal: %w", err) + } + + return nil +} + +// computeMinimalAttestationSP calculates the minimal safe attestation data for slashing protection. +// It takes the current epoch as an argument and returns an AttestationData object with the minimal safe source and target epochs. +func (km *ethKeyManagerSigner) computeMinimalAttestationSP(epoch phase0.Epoch) *phase0.AttestationData { + // Calculate the highest safe target epoch based on the current epoch and a predefined minimum distance. + highestTarget := epoch + minSPAttestationEpochGap + // The highest safe source epoch is one less than the highest target epoch. + highestSource := highestTarget - 1 + + // Return a new AttestationData object with the calculated source and target epochs. return &phase0.AttestationData{ - BeaconBlockRoot: [32]byte{}, Source: &phase0.Checkpoint{ - Epoch: source, - Root: [32]byte{}, + Epoch: highestSource, }, Target: &phase0.Checkpoint{ - Epoch: target, - Root: [32]byte{}, + Epoch: highestTarget, }, } } + +// computeMinimalProposerSP calculates the minimal safe slot for a block proposal to avoid slashing. +// It takes the current slot as an argument and returns the minimal safe slot. +func (km *ethKeyManagerSigner) computeMinimalProposerSP(slot phase0.Slot) phase0.Slot { + // Calculate the highest safe proposal slot based on the current slot and a predefined minimum distance. + return slot + minSPProposalSlotGap +} + +func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { + key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") + if err != nil { + return errors.Wrap(err, "could not generate HDKey") + } + account := wallets.NewValidatorAccount("", key, nil, "", nil) + if err := km.wallet.AddValidatorAccount(account); err != nil { + return errors.Wrap(err, "could not save new account") + } + return nil +} diff --git a/ekm/signer_key_manager_test.go b/ekm/signer_key_manager_test.go index 4efe2c4fb3..65cf5df24c 100644 --- a/ekm/signer_key_manager_test.go +++ b/ekm/signer_key_manager_test.go @@ -7,26 +7,25 @@ import ( "encoding/hex" "testing" - "github.com/bloxapp/eth2-key-manager/core" - "github.com/bloxapp/eth2-key-manager/wallets/hd" - "github.com/bloxapp/ssv/utils/rsaencryption" - - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/storage/basedb" - "github.com/attestantio/go-eth2-client/spec/altair" "github.com/attestantio/go-eth2-client/spec/bellatrix" "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/bloxapp/eth2-key-manager/core" + "github.com/bloxapp/eth2-key-manager/wallets/hd" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/utils/threshold" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" ) const ( @@ -36,7 +35,7 @@ const ( pk2Str = "8796fafa576051372030a75c41caafea149e4368aebaca21c9f90d9974b3973d5cee7d7874e4ec9ec59fb2c8945b3e01" ) -func testKeyManager(t *testing.T) spectypes.KeyManager { +func testKeyManager(t *testing.T, network *networkconfig.NetworkConfig) spectypes.KeyManager { threshold.Init() logger := logging.TestLogger(t) @@ -44,7 +43,14 @@ func testKeyManager(t *testing.T) spectypes.KeyManager { db, err := getBaseStorage(logger) require.NoError(t, err) - km, err := NewETHKeyManagerSigner(logger, db, networkconfig.TestNetwork, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, nil), + Domain: networkconfig.TestNetwork.Domain, + } + } + + km, err := NewETHKeyManagerSigner(logger, db, *network, true, "") require.NoError(t, err) sk1 := &bls.SecretKey{} @@ -120,7 +126,7 @@ func TestEncryptedKeyManager(t *testing.T) { } func TestSlashing(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) sk1 := &bls.SecretKey{} require.NoError(t, sk1.SetHexString(sk1Str)) @@ -129,12 +135,12 @@ func TestSlashing(t *testing.T) { currentSlot := km.(*ethKeyManagerSigner).storage.Network().EstimatedCurrentSlot() currentEpoch := km.(*ethKeyManagerSigner).storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance + 1 + highestTarget := currentEpoch + minSPAttestationEpochGap + 1 highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance + 1 + highestProposal := currentSlot + minSPProposalSlotGap + 1 attestationData := &phase0.AttestationData{ - Slot: 30, + Slot: currentSlot, Index: 1, BeaconBlockRoot: [32]byte{1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2}, Source: &phase0.Checkpoint{ @@ -272,7 +278,7 @@ func TestSlashing(t *testing.T) { } func TestSlashing_Attestation(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) var secretKeys [4]*bls.SecretKey for i := range secretKeys { @@ -280,8 +286,7 @@ func TestSlashing_Attestation(t *testing.T) { secretKeys[i].SetByCSPRNG() // Equivalent to AddShare but with a custom slot for minimal slashing protection. - minimalSlot := phase0.Slot(64) - err := km.(*ethKeyManagerSigner).saveMinimalSlashingProtection(secretKeys[i].GetPublicKey().Serialize(), minimalSlot) + err := km.(*ethKeyManagerSigner).BumpSlashingProtection(secretKeys[i].GetPublicKey().Serialize()) require.NoError(t, err) err = km.(*ethKeyManagerSigner).saveShare(secretKeys[i]) require.NoError(t, err) @@ -317,6 +322,12 @@ func TestSlashing_Attestation(t *testing.T) { require.NoError(t, err, "expected no slashing") require.NotZero(t, sig, "expected non-zero signature") require.NotZero(t, root, "expected non-zero root") + + highAtt, found, err := km.(*ethKeyManagerSigner).storage.RetrieveHighestAttestation(sk.GetPublicKey().Serialize()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, attestation.Source.Epoch, highAtt.Source.Epoch) + require.Equal(t, attestation.Target.Epoch, highAtt.Target.Epoch) } } @@ -360,7 +371,7 @@ func TestSlashing_Attestation(t *testing.T) { func TestSignRoot(t *testing.T) { require.NoError(t, bls.Init(bls.BLS12_381)) - km := testKeyManager(t) + km := testKeyManager(t, nil) t.Run("pk 1", func(t *testing.T) { pk := &bls.PublicKey{} diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index 15918a10e9..fa67b22222 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -40,10 +40,10 @@ import ( "github.com/bloxapp/ssv/operator/validator" "github.com/bloxapp/ssv/operator/validator/mocks" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" - mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils" "github.com/bloxapp/ssv/utils/blskeygen" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/utils/threshold" @@ -65,7 +65,13 @@ func TestHandleBlockEventsStream(t *testing.T) { ops, err := createOperators(4) require.NoError(t, err) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + currentSlot := &utils.SlotValue{} + mockBeaconNetwork := utils.SetupMockBeaconNetwork(t, currentSlot) + mockNetworkConfig := &networkconfig.NetworkConfig{ + Beacon: mockBeaconNetwork, + } + + eh, _, err := setupEventHandler(t, ctx, logger, mockNetworkConfig, ops[0], false) if err != nil { t.Fatal(err) } @@ -115,7 +121,18 @@ func TestHandleBlockEventsStream(t *testing.T) { sharesData1, err := generateSharesData(validatorData1, ops, testAddr, 0) require.NoError(t, err) + validatorData2, err := createNewValidator(ops) + require.NoError(t, err) + sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) + require.NoError(t, err) + + validatorData3, err := createNewValidator(ops) + require.NoError(t, err) + sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) + require.NoError(t, err) + blockNum := uint64(0x1) + currentSlot.SetSlot(100) t.Run("test OperatorAdded event handle", func(t *testing.T) { @@ -204,6 +221,7 @@ func TestHandleBlockEventsStream(t *testing.T) { // Receive event, unmarshall, parse, check parse event is not nil or with an error, // public key is correct, owner is correct, operator ids are correct, shares are correct + // slashing protection data is correct t.Run("test ValidatorAdded event handle", func(t *testing.T) { nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) require.NoError(t, err) @@ -251,12 +269,8 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(1), nonce) - validatorData2, err := createNewValidator(ops) - require.NoError(t, err) - sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) - require.NoError(t, err) - // SharesData length is incorrect. Nonce is bumped; Validator wasn't added + // slashing protection data is not added t.Run("test nonce bumping even for incorrect sharesData length", func(t *testing.T) { // changing the length malformedSharesData := sharesData2[:len(sharesData2)-1] @@ -305,6 +319,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Length of the shares []byte is correct; nonce is bumped; validator is added + // slashing protection data is correct t.Run("test validator 1 doesnt check validator's 4 share", func(t *testing.T) { malformedSharesData := sharesData2[:] // Corrupt the encrypted last share key of the 4th operator @@ -353,12 +368,8 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, registrystorage.Nonce(3), nonce) }) - validatorData3, err := createNewValidator(ops) - require.NoError(t, err) - sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) - require.NoError(t, err) - // Share for 1st operator is malformed; check nonce is bumped correctly; validator wasn't added + // slashing protection data is not added t.Run("test malformed ValidatorAdded and nonce is bumped", func(t *testing.T) { malformedSharesData := sharesData3[:] @@ -413,6 +424,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Correct event; check nonce is bumped correctly; validator is added + // slashing protection data is correct t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { // regenerate with updated nonce sharesData3, err = generateSharesData(validatorData3, ops, testAddr, 4) @@ -463,6 +475,7 @@ func TestHandleBlockEventsStream(t *testing.T) { // Receive event, unmarshall, parse, check parse event is not nil or with an error, // public key is correct, owner is correct, operator ids are correct + // slashing protection data is removed t.Run("test ValidatorRemoved event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.RemoveValidator( auth, @@ -499,6 +512,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct + // slashing protection data is not deleted t.Run("test ClusterLiquidated event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Liquidate( auth, @@ -528,13 +542,114 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ + + // check that slashing data was not deleted + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is nextEpoch and NOT storedEpoch t.Run("test ClusterReactivated event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Reactivate( auth, - []uint64{1, 2, 3}, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + currentSlot.SetSlot(1000) + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + + // check that slashing data was bumped + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) + + blockNum++ + }) + + // Liquidated event is far in the future + // in order to simulate stored far in the future slashing protection data + t.Run("test ClusterLiquidated event handle - far in the future", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Liquidate( + auth, + testAddr, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + }) + + // Reactivate event + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is storedEpoch and NOT nextEpoch + t.Run("test ClusterReactivated event handle - far in the future", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, big.NewInt(100_000_000), simcontract.CallableCluster{ ValidatorCount: 1, @@ -556,9 +671,26 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + currentSlot.SetSlot(100) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + + // check that slashing data is greater than current epoch + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Greater(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Greater(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Greater(t, highestProposal, currentSlot.GetSlot()) + blockNum++ }) @@ -592,7 +724,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) } -func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { +func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, network *networkconfig.NetworkConfig, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { db, err := kv.NewInMemory(logger, basedb.Options{ Ctx: ctx, }) @@ -601,11 +733,13 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op storageMap := ibftstorage.NewStores() nodeStorage, operatorData := setupOperatorStorage(logger, db, operator) - mockNetworkConfig := networkconfig.NetworkConfig{ - Beacon: setupMockBeaconNetwork(t), + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, &utils.SlotValue{}), + } } - keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, mockNetworkConfig, true, "") + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, *network, true, "") if err != nil { return nil, nil, err } @@ -626,7 +760,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - mockNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -663,7 +797,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - mockNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -677,20 +811,6 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op return eh, nil, nil } -func setupMockBeaconNetwork(t *testing.T) *mocknetwork.MockBeaconNetwork { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockBeaconNetwork := mocknetwork.NewMockBeaconNetwork(ctrl) - - currentSlot := phase0.Slot(100) - mockBeaconNetwork.EXPECT().GetBeaconNetwork().Return(networkconfig.TestNetwork.Beacon.GetBeaconNetwork()).AnyTimes() - mockBeaconNetwork.EXPECT().EstimatedCurrentSlot().Return(currentSlot).AnyTimes() - mockBeaconNetwork.EXPECT().EstimatedEpochAtSlot(gomock.Any()).Return(phase0.Epoch(currentSlot / 32)).AnyTimes() - - return mockBeaconNetwork -} - func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *testOperator) (operatorstorage.Storage, *registrystorage.OperatorData) { if operator == nil { logger.Fatal("empty test operator was passed", zap.Error(fmt.Errorf("empty test operator was passed"))) diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 7bc6e4dade..d4632ddf6f 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -12,6 +12,7 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" "go.uber.org/zap" + "github.com/bloxapp/ssv/ekm" "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/logging/fields" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" @@ -422,6 +423,13 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract return nil, fmt.Errorf("could not process cluster event: %w", err) } + // bump slashing protection for operator reactivated validators + for _, share := range toReactivate { + if err := eh.keyManager.(ekm.StorageProvider).BumpSlashingProtection(share.SharePubKey); err != nil { + return nil, fmt.Errorf("could not bump slashing protection: %w", err) + } + } + if len(enabledPubKeys) > 0 { logger = logger.With(zap.Strings("enabled_validators", enabledPubKeys)) } diff --git a/eth/eventhandler/local_events_test.go b/eth/eventhandler/local_events_test.go index 7697c79363..a61c152c37 100644 --- a/eth/eventhandler/local_events_test.go +++ b/eth/eventhandler/local_events_test.go @@ -46,7 +46,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -73,7 +73,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index e692981f42..ecf69530e3 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -52,7 +52,7 @@ func TestExecuteTask(t *testing.T) { ops, err := createOperators(1) require.NoError(t, err) - eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, ops[0], true) + eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, nil, ops[0], true) require.NoError(t, err) t.Run("test AddValidator task execution - not started", func(t *testing.T) { @@ -149,7 +149,7 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { ops, err := createOperators(1) require.NoError(t, err) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } diff --git a/utils/testutils.go b/utils/testutils.go new file mode 100644 index 0000000000..bfd9290b25 --- /dev/null +++ b/utils/testutils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "sync" + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/golang/mock/gomock" + + "github.com/bloxapp/ssv/networkconfig" + mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" +) + +type SlotValue struct { + mu sync.Mutex + slot phase0.Slot +} + +func (sv *SlotValue) SetSlot(s phase0.Slot) { + sv.mu.Lock() + defer sv.mu.Unlock() + sv.slot = s +} + +func (sv *SlotValue) GetSlot() phase0.Slot { + sv.mu.Lock() + defer sv.mu.Unlock() + return sv.slot +} + +func SetupMockBeaconNetwork(t *testing.T, currentSlot *SlotValue) *mocknetwork.MockBeaconNetwork { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + if currentSlot == nil { + currentSlot = &SlotValue{} + currentSlot.SetSlot(32) + } + + mockBeaconNetwork := mocknetwork.NewMockBeaconNetwork(ctrl) + mockBeaconNetwork.EXPECT().GetBeaconNetwork().Return(networkconfig.TestNetwork.Beacon.GetBeaconNetwork()).AnyTimes() + + mockBeaconNetwork.EXPECT().EstimatedCurrentSlot().DoAndReturn( + func() phase0.Slot { + return currentSlot.GetSlot() + }, + ).AnyTimes() + mockBeaconNetwork.EXPECT().EstimatedEpochAtSlot(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) phase0.Epoch { + return phase0.Epoch(slot / 32) + }, + ).AnyTimes() + + return mockBeaconNetwork +} From 131a2f2874d1245d086f0baa74e1b74bb616eb25 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Wed, 20 Sep 2023 14:58:40 +0300 Subject: [PATCH 12/54] eth1 Event handler tests improvements (#1125) * improved ValidatorRemoved tests. Fixed sol contract * improved OperatorRemoved test * improved ClusterLiquidated & ClusterReactivated tests * minor fixes + do / undo tests * minor fixes + do / undo tests (#1132) * tests: added ValidatorRegistered test case with different owner --- eth/eventhandler/event_handler_test.go | 595 +++++++++++++++++++--- eth/eventhandler/local_events_test.go | 2 +- eth/eventhandler/task_executor_test.go | 4 +- eth/simulator/simcontract/simcontract.go | 2 +- eth/simulator/simcontract/simcontract.sol | 33 +- 5 files changed, 551 insertions(+), 85 deletions(-) diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index fa67b22222..ec33d5ed50 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -61,9 +61,11 @@ func TestHandleBlockEventsStream(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + operatorsCount := uint64(0) // Create operators rsa keys - ops, err := createOperators(4) + ops, err := createOperators(4, operatorsCount) require.NoError(t, err) + operatorsCount += uint64(len(ops)) currentSlot := &utils.SlotValue{} mockBeaconNetwork := utils.SetupMockBeaconNetwork(t, currentSlot) @@ -75,7 +77,18 @@ func TestHandleBlockEventsStream(t *testing.T) { if err != nil { t.Fatal(err) } - sim := simTestBackend(testAddr) + + // Just creating one more key -> address for testing + wrongPk, err := crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + require.NoError(t, err) + testAddr2 := crypto.PubkeyToAddress(wrongPk.PublicKey) + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddr + testAddresses[1] = &testAddr2 + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) // Create JSON-RPC handler rpcServer, _ := sim.Node.RPCHandler() @@ -121,10 +134,9 @@ func TestHandleBlockEventsStream(t *testing.T) { sharesData1, err := generateSharesData(validatorData1, ops, testAddr, 0) require.NoError(t, err) + // Create another validator. We'll create the shares later in the tests validatorData2, err := createNewValidator(ops) require.NoError(t, err) - sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) - require.NoError(t, err) validatorData3, err := createNewValidator(ops) require.NoError(t, err) @@ -161,18 +173,18 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, len(operators)) - // Hanlde the event + // Handle the event lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check storage for a new operator + // Check storage for the new operators operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) - // Check if an operator in the storage has same attributes + // Check if operators in the storage have same attributes for i, log := range block.Logs { operatorAddedEvent, err := contractFilterer.ParseOperatorAdded(log) require.NoError(t, err) @@ -184,39 +196,115 @@ func TestHandleBlockEventsStream(t *testing.T) { } }) - // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct t.Run("test OperatorRemoved event handle", func(t *testing.T) { - // Call the contract method - _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 1) - require.NoError(t, err) - sim.Commit() - block := <-logs - require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + // Should return MalformedEventError and no changes to the state + t.Run("test OperatorRemoved incorrect operator ID", func(t *testing.T) { + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 100500) + require.NoError(t, err) + sim.Commit() - eventsCh := make(chan executionclient.BlockLogs) - go func() { - defer close(eventsCh) - eventsCh <- block - }() + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) - // Check that there is 1 registered operator - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() - // Hanlde the event - lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) - require.NoError(t, err) - blockNum++ + // Check that there is 1 registered operator + operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) - // Check if the operator was removed successfuly - // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check if the operator wasn't removed successfully + operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct + // TODO: fix this test. It checks nothing, due the handleOperatorRemoved method is no-op currently + t.Run("test OperatorRemoved happy flow", func(t *testing.T) { + // Prepare a new operator to remove it later in this test + op, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + + // Call the contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op[0].pub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Handle OperatorAdded event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + // Check storage for the new operator + operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Now start the OperatorRemoved event handling + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 4) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Handle OperatorRemoved event + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + // Check if the operator was removed successfully + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + //require.NoError(t, err) + //require.Equal(t, len(ops), len(operators)) + }) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, @@ -269,6 +357,9 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(1), nonce) + sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) + require.NoError(t, err) + // SharesData length is incorrect. Nonce is bumped; Validator wasn't added // slashing protection data is not added t.Run("test nonce bumping even for incorrect sharesData length", func(t *testing.T) { @@ -471,44 +562,184 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(5), nonce) }) + + t.Run("test correct ValidatorAdded again and nonce is bumped with another owner", func(t *testing.T) { + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + authTestAddr2, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr2, 0) + require.NoError(t, err) + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + authTestAddr2, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) + blockNum++ + + requireKeyManagerDataToExist(t, eh, 4, validatorData4) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, 4, len(shares)) + // and nonce was bumped + nonce, err = eh.nodeStorage.GetNextNonce(nil, testAddr2) + require.NoError(t, err) + // Check that nonces are not intertwined between different owner accounts! + require.Equal(t, registrystorage.Nonce(1), nonce) + }) + }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, - // public key is correct, owner is correct, operator ids are correct - // slashing protection data is removed - t.Run("test ValidatorRemoved event handle", func(t *testing.T) { - _, err = boundContract.SimcontractTransactor.RemoveValidator( - auth, - validatorData1.masterPubKey.Serialize(), - []uint64{1, 2, 3, 4}, - simcontract.CallableCluster{ - ValidatorCount: 1, - NetworkFeeIndex: 1, - Index: 1, - Active: true, - Balance: big.NewInt(100_000_000), - }) - require.NoError(t, err) - sim.Commit() + t.Run("test ValidatorRemoved event handling", func(t *testing.T) { + // Must throw error "malformed event: could not find validator share" + t.Run("ValidatorRemoved incorrect event public key", func(t *testing.T) { + pk := validatorData1.masterPubKey.Serialize() + // Corrupt the public key + pk[len(pk)-1] ^= 1 - block := <-logs - require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + pk, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() - eventsCh := make(chan executionclient.BlockLogs) - go func() { - defer close(eventsCh) - eventsCh <- block - }() + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) - requireKeyManagerDataToExist(t, eh, 3, validatorData1) + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() - lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, true) - require.NoError(t, err) - require.Equal(t, blockNum+1, lastProcessedBlock) - blockNum++ + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ - requireKeyManagerDataToNotExist(t, eh, 2, validatorData1) + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + t.Run("ValidatorRemoved incorrect owner address", func(t *testing.T) { + wrongAuth, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + wrongAuth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, + // public key is correct, owner is correct, operator ids are correct + // event handler's own operator is responsible for removed validator + t.Run("ValidatorRemoved happy flow", func(t *testing.T) { + valPubKey := validatorData1.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, valShare) + requireKeyManagerDataToExist(t, eh, 4, validatorData1) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator was removed from the validator shares storage. + shares := eh.nodeStorage.Shares().List(nil) + require.Equal(t, 3, len(shares)) + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + requireKeyManagerDataToNotExist(t, eh, 3, validatorData1) + }) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct @@ -538,11 +769,21 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests. This one has to be in the state + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) // check that slashing data was not deleted sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) @@ -671,6 +912,12 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) currentSlot.SetSlot(100) lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) @@ -692,13 +939,17 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Greater(t, highestProposal, currentSlot.GetSlot()) blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, fee recipient is correct t.Run("test FeeRecipientAddressUpdated event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.SetFeeRecipientAddress( auth, - ethcommon.HexToAddress("0x1"), + testAddr2, ) require.NoError(t, err) sim.Commit() @@ -717,10 +968,198 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check if the fee recepient was updated - recepientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) + // Check if the fee recipient was updated + recipientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) require.NoError(t, err) - require.Equal(t, ethcommon.HexToAddress("0x1").String(), recepientData.FeeRecipient.String()) + require.Equal(t, testAddr2.String(), recipientData.FeeRecipient.String()) + }) + + // DO / UNDO in one block tests + t.Run("test DO / UNDO in one block", func(t *testing.T) { + t.Run("test OperatorAdded + OperatorRemoved events handling", func(t *testing.T) { + // There are 5 ops before the test running + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, operatorsCount, uint64(len(operators))) + + tmpOps, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + op := tmpOps[0] + + // Call the RegisterOperator contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.pub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + // Call the OperatorRemoved contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, op.id) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // #TODO: Fails until we fix the OperatorAdded: handlers.go #108 + // Check storage for the new operators + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + //require.NoError(t, err) + //require.Equal(t, operatorsCount-1, uint64(len(operators))) + // + //_, found, err := eh.nodeStorage.GetOperatorData(nil, op.id) + //require.NoError(t, err) + //require.False(t, found) + }) + + t.Run("test ValidatorAdded + ValidatorRemoved events handling", func(t *testing.T) { + shares := eh.nodeStorage.Shares().List(nil) + sharesCountBeforeTest := len(shares) + + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + + currentNonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr, int(currentNonce)) + require.NoError(t, err) + + valPubKey := validatorData4.masterPubKey.Serialize() + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + auth, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + valPubKey, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, sharesCountBeforeTest, len(shares)) + // and nonce was bumped + nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + require.Equal(t, currentNonce+1, nonce) + }) + + t.Run("test ClusterLiquidated + ClusterReactivated events handling", func(t *testing.T) { + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + + require.NotNil(t, share) + require.False(t, share.Liquidated) + _, err = boundContract.SimcontractTransactor.Liquidate( + auth, + testAddr, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + }) }) } @@ -857,11 +1296,15 @@ func unmarshalLog(t *testing.T, rawOperatorAdded string) ethtypes.Log { return vLogOperatorAdded } -func simTestBackend(testAddr ethcommon.Address) *simulator.SimulatedBackend { +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + return simulator.NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000000000)}, - }, 10000000, + genesis, 10000000, ) } @@ -870,7 +1313,7 @@ func TestCreatingSharesData(t *testing.T) { owner := testAddr nonce := 0 // - ops, err := createOperators(4) + ops, err := createOperators(4, 1) require.NoError(t, err) validatorData, err := createNewValidator(ops) @@ -968,7 +1411,7 @@ func createNewValidator(ops []*testOperator) (*testValidatorData, error) { return validatorData, nil } -func createOperators(num uint64) ([]*testOperator, error) { +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { testops := make([]*testOperator, num) for i := uint64(1); i <= num; i++ { @@ -977,7 +1420,7 @@ func createOperators(num uint64) ([]*testOperator, error) { return nil, err } testops[i-1] = &testOperator{ - id: i, + id: idOffset + i, pub: pb, priv: sk, } diff --git a/eth/eventhandler/local_events_test.go b/eth/eventhandler/local_events_test.go index a61c152c37..fda1ae0080 100644 --- a/eth/eventhandler/local_events_test.go +++ b/eth/eventhandler/local_events_test.go @@ -18,7 +18,7 @@ import ( func TestHandleLocalEvent(t *testing.T) { // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) t.Run("correct OperatorAdded event", func(t *testing.T) { diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index ecf69530e3..49b34f5ccf 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -49,7 +49,7 @@ func TestExecuteTask(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, nil, ops[0], true) @@ -146,7 +146,7 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) diff --git a/eth/simulator/simcontract/simcontract.go b/eth/simulator/simcontract/simcontract.go index 9da8921e7a..2877c65b29 100644 --- a/eth/simulator/simcontract/simcontract.go +++ b/eth/simulator/simcontract/simcontract.go @@ -41,7 +41,7 @@ type CallableCluster struct { // SimcontractMetaData contains all meta data concerning the Simcontract contract. var SimcontractMetaData = &bind.MetaData{ ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterLiquidated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterReactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"FeeRecipientAddressUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"OperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"OperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"shares\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorRemoved\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"clusterOwner\",\"type\":\"address\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"liquidate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"reactivate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"registerOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"bytes\",\"name\":\"sharesData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"registerValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"removeOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"removeValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"setFeeRecipientAddress\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b8273ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea2646970667358221220a849e84b21b5cf14144f9145592d2e879b8dfd174c980e9d839aabab095d209064736f6c63430008120033", + Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b3373ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea26469706673582212206464f7d32909b03e1e16f822f4ba73e56f9b875dfda6cb13f3fc97c182c5e43664736f6c63430008120033", } // SimcontractABI is the input ABI used to generate the binding from. diff --git a/eth/simulator/simcontract/simcontract.sol b/eth/simulator/simcontract/simcontract.sol index 23277e23e2..9325802822 100644 --- a/eth/simulator/simcontract/simcontract.sol +++ b/eth/simulator/simcontract/simcontract.sol @@ -52,20 +52,43 @@ contract Callable { _operatorId += 1; emit OperatorAdded(_operatorId, msg.sender, publicKey, fee); } - function removeOperator(uint64 operatorId) public {emit OperatorRemoved(operatorId);} + + function removeOperator(uint64 operatorId) public { + emit OperatorRemoved(operatorId); + } + function registerValidator( bytes calldata publicKey, uint64[] memory operatorIds, bytes calldata sharesData, uint256 amount, Cluster memory cluster - ) public { emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster);} + ) public { + emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster); + } + function removeValidator( bytes calldata publicKey, uint64[] calldata operatorIds, Cluster memory cluster - ) public {emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster);} - function liquidate(address clusterOwner, uint64[] memory operatorIds, Cluster memory cluster) public {emit ClusterLiquidated(clusterOwner, operatorIds, cluster);} - function reactivate(uint64[] calldata operatorIds, uint256 amount, Cluster memory cluster) public {emit ClusterReactivated(msg.sender, operatorIds, cluster);} + ) public { + emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster); + } + + function liquidate(address clusterOwner, + uint64[] memory operatorIds, + Cluster memory cluster + ) public { + emit ClusterLiquidated(msg.sender, operatorIds, cluster); + } + + function reactivate( + uint64[] calldata operatorIds, + uint256 amount, + Cluster memory cluster + ) public { + emit ClusterReactivated(msg.sender, operatorIds, cluster); + } + function setFeeRecipientAddress(address recipientAddress) public {emit FeeRecipientAddressUpdated(msg.sender, recipientAddress);} } From 1bcd6bf0048b84734bc1e67ad94c320d818da387 Mon Sep 17 00:00:00 2001 From: Nikita Kryuchkov Date: Tue, 3 Oct 2023 17:07:24 +0400 Subject: [PATCH 13/54] Message validation (#1066) * Fix issues in tests * Fix linter * Fix TestSSVMapping * Attempt to fix fetchLogsInBatches * Revert "Attempt to fix fetchLogsInBatches" This reverts commit 96006c248939b3e46fd04286d75fcb0fe82ef904. * Attempt to fix fetchLogsInBatches * Revert "Attempt to fix fetchLogsInBatches" This reverts commit 5a3d9de0de8c11a9669a8a235b475f0406a79140. * Revert "Fix TestSSVMapping" This reverts commit c3263c12e53b8e94ec5ab94fdbc49a093e3d7764. * Revert "Fix linter" This reverts commit 8f72bb280ef37660be00c347fefd18e3ac2311a8. * Revert "Fix issues in tests" This reverts commit 0720abb93aea2a2eb819f2f429d4b168c604f311. * Change batch verifier params * Disable signature check * Revert "Disable signature check" This reverts commit 311c7227dc6edfc49c6e626d974724427b120665. * Batch size 10 * Disable signature check * Use BLSU for signature verification * BLSU aggregate * Disable partial signature check * Disable consensus signature check * Fix linter * Revert "BLSU aggregate" * Revert "Use BLSU for signature verification" This reverts commit 6d4c15f81f2bf0f9e7e91210e932eb9367c64619. * AggregateVerify for partial signature * Simplify duplicated signer check * Fix error text * Fix check order * Fix issues in tests * Revert "Fix issues in tests" This reverts commit b40355cee3e9166557d6fdc748f8a55aae0fa2d2. * Move deployment to 5-8 * Fix some tests * Fix data race * Revert "Fix data race" This reverts commit e3596a20af593de1a92a88b44e8daf52d55a42c6. * Attempt to fix data race * Revert "Attempt to fix data race" This reverts commit 38cce5f8fa68fcf286e77d8d9e85f0b017a33388. * Attempt to fix data race [2] * Fix spec tests * Fix a data race * Fix a data race in tests bootstrapper * Fix differ config * Change message size metric to histogram * Fix some tests * Fix linter * Fix unit tests * More tests * Improve tests * More tests * More tests [2] * More tests [3] * Fix incorrect usage of errors.As in tests * More tests * Deploy to 1-4 instead of 5-8 * Add logs with message processing duration * Fix message validation duration buckets * Add logs/metrics with signature check duration * Disable duration logs * Use single verification * Revert "Use single verification" This reverts commit cae01d2371dd7a010d260299ea1b6e7791147199. * More tests * Fix max duties check * More tests * Fix duty count validation * More tests * More tests * Fix duty count bug * Add message queue metrics * Fix a typo * Revert "Fix message validation duration buckets" This reverts commit 269340ba935ff2d3ba10f8a83f22b0b549ef8008. * Fix validation duration buckets * Cache serialized signature * Fix variable name * Revert "Fix variable name" This reverts commit 15345a2a34836e6c84d0f36910f0d8e0c4c77f60. * Revert "Cache serialized signature" This reverts commit 20245e2895b7cea4b94cda2abcd008055e64897e. * Queue len/cap as gauge * Add message validation dashboard * Enable signature checks * Disable signature verification for non-committee non-decided * Fix bug with committee check * Verify partial signatures only if in committee * Fix check * Simplify code * Add metrics if in committee * Fix value of InvalidMessageDeliveriesWeight * Deploy to 5-8 instead of 1-4 * Implement duty fetcher for message validation * Get rid of optsTemplate in validatorsMap * Make validators map exported and pass it to duty fetcher * Fix panic due to context passing * Simplify saving validators * Debug panic * Revert "Debug panic" This reverts commit 76c273f83e70252c4913ae3e1dc6b3696abf724f. * Revert "Simplify saving validators" This reverts commit b24d7044cbf4fbaef41496cf21119b66dc0b54ca. * Revert "Fix panic due to context passing" This reverts commit 44b47f30852f47eba7936904f08518f6271aa748. * Revert "Make validators map exported and pass it to duty fetcher" This reverts commit 82ee64d1e5e9e5658e74b2590bf9a07d5b92337d. * Revert "Get rid of optsTemplate in validatorsMap" This reverts commit 20cc1b7bc34415794658f5475e25ff81409ef244. * Revert "Implement duty fetcher for message validation" This reverts commit dabc10ab3276f615f3cb714e8a3e33c0c276ae53. * Implement duty fetcher for message validation * Fix context panic * Fix signature in message validation tests * Get rid of optsTemplate in validatorsMap * Fix condition * Make validators map exported and pass it to duty fetcher * Revert "Make validators map exported and pass it to duty fetcher" This reverts commit 65729f3fd9e6b2a5f807bbb3d8fc3a51d8ca7e3d. * Fix logic of validating beacon duty, add test * Make validators map exported and pass it to duty fetcher * Start duty fetcher * Revert "Start duty fetcher" This reverts commit cf3f51ff5613512ce15f13fb9280a3862298f596. * Revert "Make validators map exported and pass it to duty fetcher" This reverts commit 563143c676e5b7e8d7557e7b941335c55184dda5. * Start duty fetcher * Attempt to make validators map exported * Move ActiveValidatorIndices to ValidatorsMap * Use logger in DutyFetcher * Pass validators map from node to validator controller * Enable duty fetcher * Revert "Enable duty fetcher" This reverts commit 3648788e6df076ec0e1e2abc1697a20133dca3fd. * Extract validator creation logic from validators map * Simplify creating validator * Delete redundant comment * Fix linter * Init and start duty fetcher but don't pass to message validator * Fix fetcher bug * Use fetcher * go mod tidy * Duty fetcher tests * Remove redundant file * Fix imports * Disable deployment * Enable signature check for all messages * Fix issues after merging * Fix .gitlab-ci.yml * Enable message validation in sync * Revert "Enable signature check for all messages" This reverts commit 8c692b9e37c4c806747b5186ee6d61a073034f4f. * Deploy to 5-8 * Try blst verify * Try to fix error * Enable signature check for all messages * Revert "Enable signature check for all messages" This reverts commit b0849c865faf0ffeb37533d0145b05421e058b2e. * Revert "Try to fix error" This reverts commit 0c9d35f6a81ddec3fa4a12b1d604f6270c3f2adb. * Revert "Try blst verify" This reverts commit 38bbe351549c7d8aa3493890ebe45cb1fc8b50b3. * Flag for signature verification in message validation * Optimization for SingleVerifyByOperators * Remove outdated comment * Delete unused errors * Try to fix bug with ErrTooManyDutiesPerEpoch * Code review requests * Try to fix bug with ErrTooManyDuties * Delete ErrUnexpectedMessageOrder * Delete ErrDecidedSignersSequence * Try RSA verification instead of BLS in most places * Enable signature check in message validation * Revert "Enable signature check in message validation" This reverts commit ef1a05cdc620469ce9c007c4cedf9d79a82b799c. * Revert "Try RSA verification instead of BLS in most places" This reverts commit 0e16b111495d91b3ef255f2f9ebe93f703b40991. * Don't check for signature check flag in protocol * Revert "Don't check for signature check flag in protocol" This reverts commit 9f46933fede0b87443208ba6f339061fddc68d81. * Add logs for config * Debugging * Debugging [2] * Debugging [3] * Debugging [4] * Debugging [5] * Debugging [6] * Add passing signature check for non-committee validators * Revert "Debugging [6]" This reverts commit d4d99d0a6d2bd7bb10f2df0024adcbbd0c4dd704. * Revert "Debugging [5]" This reverts commit 425f30b266984a5f56a8e4ef4afff4ca90b8637b. * Revert "Debugging [4]" This reverts commit a5ade77748eb364a1ac5a9e97387300a0ab1f7be. * Revert "Debugging [3]" This reverts commit 97398bc1397196a781a3e29ee2317684dd46e0e7. * Revert "Debugging [2]" This reverts commit 91b450710e0eacab3a9d236fe4062cb1c92dbb7d. * Revert "Debugging" This reverts commit adb769f22885157adce86ec625ac0cef059920ae. * Revert "Add logs for config" This reverts commit f016ace19012d23e23e14cd4363233c8debe139b. * Try RSA verification instead of BLS in most places * Enable signature check in message validation * Revert "Try RSA verification instead of BLS in most places" This reverts commit 29a196e556c88a14cbbe4dd84175f3399cfa956d. * Revert "Enable signature check in message validation" This reverts commit 080d8765f32f26bd617022e0ad417972d7a95a12. * Try to fix bug with ErrTooManyDutiesPerEpoch * fetch metadata for all validators * LA Audit (#1136) * Audits Directory * add to README * Update dashboard * Revert "fetch metadata for all validators" This reverts commit 68df1770b1a489911a1e1c1a9c43d858a546de74. * fetch metadata for all validators * Fix differ * Attempt to fix duty fetcher * Fix panic * Fix duty fetcher tests * Fix event handler tests * Add ErrNoShareMetadata * Add test for ErrNoShareMetadata * Revert differ change * Get rid of type assertions/switches * Extract duty storage from duty scheduler * Fetch all duties, process only committee duties for proposer * Fetch all duties, process only committee duties for sync committee * Deploy to 1-4 as well * Fix potential nil ptr dereference * Forbid consensus message with validator registration role * Revert message queue changes * Check message counts for partial signature messages * Add role and round to metrics * Update Grafana * Update Grafana * WIP on rejection/banning test * rename `dutystorage` to `dutystore` * refactor * refator MessageValidator to an interface * update Grafana dashboard * Don't start validator registration duty if not attesting * Test improvements from Andrew * Fix docker build * Update bind-tools * Cleanup * rename `CheckSignature` to `VerifySignatures` * Enable nil config checks * Create qbft config in message validation * Revert "Enable nil config checks" This reverts commit 5b547d92518a5e0ea6b5024f2e5a7e2c4f49f4c3. * Fix panic * Fix panic [2] * Add signature check option * Add godoc * Disable deployment * Reset epoch/period for previous one instead of current one * Revert "Disable deployment" This reverts commit cb585d3e9ba607447cb473db2ccf1d63f6165e87. * Fix duplication bug * keep old duties * Revert "Reset epoch/period for previous one instead of current one" This reverts commit 0a803a02ed4e7cda7b90d1924b2385198df833eb. * Disable deployment * Revert "Disable deployment" This reverts commit cfd23f8ee8b35f9809420c155a6027d15e72ac6d. * Cleanup a TODO * Disable deployment * Revert "Disable deployment" This reverts commit cc0ab52b3ef773ef983907734ef08b63008e98c5. * Pass context instead of logger to message router * Add a test for GetSlotEndTime * performance optimization * change YAML representation of msg validation sig flag & add log * extract batch verifier to separate file * approve spec alignment * fix test * Delete debug.log * Disable deployment * Disable batch verifier * Fix compilation * Delete batch verifier * Simplify GetSignerState * Add a note about NTP * go mod tidy --------- Co-authored-by: moshe-blox Co-authored-by: moshe-blox <89339422+moshe-blox@users.noreply.github.com> --- Dockerfile | 3 +- cli/operator/node.go | 77 +- docs/OPERATOR_GETTING_STARTED.md | 3 +- eth/eventhandler/event_handler_test.go | 2 + eth/eventhandler/task_executor_test.go | 2 +- eth/eventsyncer/event_syncer_test.go | 2 + go.mod | 2 +- integration/qbft/tests/msg_router.go | 15 +- integration/qbft/tests/scenario_test.go | 4 +- logging/names.go | 1 + logging/testing.go | 5 +- message/validation/consensus_validation.go | 434 ++++ .../validation/consensus_validation_test.go | 104 + message/validation/errors.go | 100 + message/validation/message_counts.go | 156 ++ message/validation/metrics.go | 38 + message/validation/partial_validation.go | 251 ++ message/validation/qbft_config.go | 53 + message/validation/signer_state.go | 45 + message/validation/validation.go | 556 +++++ message/validation/validation_test.go | 1774 ++++++++++++++ .../grafana/dashboard_msg_validation.json | 2175 +++++++++++++++++ .../metricsreporter/metrics_reporter.go | 239 +- network/network.go | 6 +- network/p2p/config.go | 6 + network/p2p/p2p.go | 29 +- network/p2p/p2p_pubsub.go | 25 +- network/p2p/p2p_setup.go | 18 +- network/p2p/p2p_sync.go | 21 +- network/p2p/p2p_test.go | 26 +- network/p2p/test_utils.go | 3 + network/syncing/syncer.go | 40 +- network/syncing/syncer_test.go | 3 +- network/topics/controller.go | 48 +- network/topics/controller_test.go | 287 ++- network/topics/metrics.go | 26 +- network/topics/msg_validator.go | 67 - network/topics/msg_validator_test.go | 108 +- network/topics/params/gossipsub.go | 2 +- network/topics/params/topic_score.go | 5 +- network/topics/pubsub.go | 51 +- network/topics/scoring.go | 2 +- operator/duties/attester.go | 47 +- operator/duties/attester_test.go | 192 +- operator/duties/base_handler.go | 21 - operator/duties/dutystore/duties.go | 97 + operator/duties/dutystore/store.go | 19 + operator/duties/dutystore/sync_committee.go | 76 + operator/duties/mocks/scheduler.go | 26 +- operator/duties/proposer.go | 50 +- operator/duties/proposer_test.go | 110 +- operator/duties/scheduler.go | 16 +- .../{synccommittee.go => sync_committee.go} | 75 +- ...mmittee_test.go => sync_committee_test.go} | 44 +- operator/duties/validatorregistration.go | 2 +- operator/node.go | 11 +- operator/validator/controller.go | 142 +- operator/validator/controller_test.go | 67 +- operator/validator/metrics.go | 28 - operator/validator/mocks/controller.go | 26 +- operator/validator/router.go | 24 +- operator/validator/router_test.go | 20 +- operator/validator/task_executor.go | 8 +- operator/validator/validators_map.go | 126 - operator/validatorsmap/validators_map.go | 110 + .../v2/blockchain/beacon/mocks/network.go | 14 + protocol/v2/blockchain/beacon/network.go | 6 + protocol/v2/blockchain/beacon/network_test.go | 19 + protocol/v2/qbft/config.go | 23 +- protocol/v2/qbft/controller/future_msg.go | 7 +- protocol/v2/qbft/instance/commit.go | 7 +- protocol/v2/qbft/instance/prepare.go | 6 +- protocol/v2/qbft/instance/proposal.go | 34 +- protocol/v2/qbft/instance/round_change.go | 15 +- protocol/v2/qbft/roundtimer/timer.go | 12 +- .../v2/qbft/spectest/qbft_mapping_test.go | 16 +- protocol/v2/qbft/testing/utils.go | 13 +- protocol/v2/queue/worker/message_worker.go | 19 +- .../v2/queue/worker/message_worker_test.go | 14 +- .../v2/ssv/queue/message_prioritizer_test.go | 3 +- protocol/v2/ssv/queue/messages.go | 14 +- protocol/v2/ssv/queue/metrics.go | 37 +- protocol/v2/ssv/queue/queue_test.go | 30 +- protocol/v2/ssv/runner/runner.go | 3 +- protocol/v2/ssv/runner/runner_signatures.go | 17 +- .../v2/ssv/spectest/msg_processing_type.go | 7 +- protocol/v2/ssv/spectest/ssv_mapping_test.go | 188 +- .../sync_committee_aggregator_proof_type.go | 2 +- protocol/v2/ssv/validator/metrics.go | 45 + .../v2/ssv/validator/msgqueue_consumer.go | 16 +- .../ssv/validator/non_committee_validator.go | 10 +- protocol/v2/ssv/validator/opts.go | 4 + protocol/v2/ssv/validator/timer.go | 2 +- protocol/v2/ssv/validator/validator.go | 37 +- protocol/v2/types/bls.go | 2 +- protocol/v2/types/crypto.go | 5 - registry/storage/shares.go | 7 + scripts/spec-alignment/differ.config.yaml | 5 +- 98 files changed, 7618 insertions(+), 1172 deletions(-) create mode 100644 message/validation/consensus_validation.go create mode 100644 message/validation/consensus_validation_test.go create mode 100644 message/validation/errors.go create mode 100644 message/validation/message_counts.go create mode 100644 message/validation/metrics.go create mode 100644 message/validation/partial_validation.go create mode 100644 message/validation/qbft_config.go create mode 100644 message/validation/signer_state.go create mode 100644 message/validation/validation.go create mode 100644 message/validation/validation_test.go create mode 100644 monitoring/grafana/dashboard_msg_validation.json delete mode 100644 network/topics/msg_validator.go create mode 100644 operator/duties/dutystore/duties.go create mode 100644 operator/duties/dutystore/store.go create mode 100644 operator/duties/dutystore/sync_committee.go rename operator/duties/{synccommittee.go => sync_committee.go} (86%) rename operator/duties/{synccommittee_test.go => sync_committee_test.go} (94%) delete mode 100644 operator/validator/validators_map.go create mode 100644 operator/validatorsmap/validators_map.go create mode 100644 protocol/v2/blockchain/beacon/network_test.go create mode 100644 protocol/v2/ssv/validator/metrics.go diff --git a/Dockerfile b/Dockerfile index 0faa9e340e..44c362dcfc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,6 @@ RUN apt-get update && \ git=1:2.39.2-1.1 \ zip=3.0-13 \ unzip=6.0-28 \ - wget=1.21.3-1+b2 \ g++=4:12.2.0-3 \ gcc-aarch64-linux-gnu=4:12.2.0-3 \ bzip2=1.0.8-5+b1 \ @@ -61,7 +60,7 @@ RUN apk -v --update add \ ca-certificates=20230506-r0 \ bash=5.2.15-r5 \ make=4.4.1-r1 \ - bind-tools=9.18.16-r0 && \ + bind-tools=9.18.19-r0 && \ rm /var/cache/apk/* COPY --from=builder /go/bin/ssvnode /go/bin/ssvnode diff --git a/cli/operator/node.go b/cli/operator/node.go index 4dd14f558a..d280bd662e 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -19,7 +19,6 @@ import ( "github.com/bloxapp/ssv/api/handlers" apiserver "github.com/bloxapp/ssv/api/server" - "github.com/bloxapp/ssv/beacon/goclient" global_config "github.com/bloxapp/ssv/cli/config" "github.com/bloxapp/ssv/ekm" @@ -34,6 +33,7 @@ import ( ssv_identity "github.com/bloxapp/ssv/identity" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/migrations" "github.com/bloxapp/ssv/monitoring/metrics" "github.com/bloxapp/ssv/monitoring/metricsreporter" @@ -42,9 +42,11 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/nodeprobe" "github.com/bloxapp/ssv/operator" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/slot_ticker" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -60,6 +62,10 @@ type KeyStore struct { PasswordFile string `yaml:"PasswordFile" env:"PASSWORD_FILE" env-description:"Password for operator private key file decryption"` } +type MessageValidation struct { + VerifySignatures bool `yaml:"VerifySignatures" env:"MESSAGE_VALIDATION_VERIFY_SIGNATURES" env-default:"false" env-description:"Experimental feature to verify signatures in pubsub's message validation instead of in consensus protocol."` +} + type config struct { global_config.GlobalConfig `yaml:"global"` DBOptions basedb.Options `yaml:"db"` @@ -72,13 +78,11 @@ type config struct { MetricsAPIPort int `yaml:"MetricsAPIPort" env:"METRICS_API_PORT" env-description:"Port to listen on for the metrics API."` EnableProfile bool `yaml:"EnableProfile" env:"ENABLE_PROFILE" env-description:"flag that indicates whether go profiling tools are enabled"` NetworkPrivateKey string `yaml:"NetworkPrivateKey" env:"NETWORK_PRIVATE_KEY" env-description:"private key for network identity"` - - WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` - WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` - - SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` - - LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` + WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` + SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` + LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + MessageValidation MessageValidation `yaml:"MessageValidation"` } var cfg config @@ -97,6 +101,11 @@ var StartNodeCmd = &cobra.Command{ log.Fatal("could not create logger", err) } defer logging.CapturePanic(logger) + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + networkConfig, err := setupSSVNetwork(logger) if err != nil { logger.Fatal("could not setup network", zap.Error(err)) @@ -128,23 +137,9 @@ var StartNodeCmd = &cobra.Command{ return currentEpoch >= cfg.P2pNetworkConfig.PermissionedActivateEpoch && currentEpoch < cfg.P2pNetworkConfig.PermissionedDeactivateEpoch } - cfg.P2pNetworkConfig.Permissioned = permissioned - cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) - cfg.P2pNetworkConfig.NodeStorage = nodeStorage - cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) - cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode - cfg.P2pNetworkConfig.Network = networkConfig - - p2pNetwork := setupP2P(logger, db) - slotTicker := slot_ticker.NewTicker(cmd.Context(), networkConfig) - metricsReporter := metricsreporter.New( - metricsreporter.WithLogger(logger), - ) - cfg.ConsensusClient.Context = cmd.Context() - cfg.ConsensusClient.Graffiti = []byte("SSV.Network") cfg.ConsensusClient.GasLimit = spectypes.DefaultGasLimit cfg.ConsensusClient.Network = networkConfig.Beacon.GetNetwork() @@ -166,6 +161,36 @@ var StartNodeCmd = &cobra.Command{ logger.Fatal("could not connect to execution client", zap.Error(err)) } + cfg.P2pNetworkConfig.Permissioned = permissioned + cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) + cfg.P2pNetworkConfig.NodeStorage = nodeStorage + cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) + cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode + cfg.P2pNetworkConfig.Network = networkConfig + + validatorsMap := validatorsmap.New(cmd.Context()) + + dutyStore := dutystore.New() + cfg.SSVOptions.DutyStore = dutyStore + + messageValidator := validation.NewMessageValidator( + networkConfig, + validation.WithShareStorage(nodeStorage.Shares()), + validation.WithLogger(logger), + validation.WithMetrics(metricsReporter), + validation.WithDutyStore(dutyStore), + validation.WithOwnOperatorID(operatorData.ID), + validation.WithSignatureVerification(cfg.MessageValidation.VerifySignatures), + ) + + cfg.P2pNetworkConfig.Metrics = metricsReporter + cfg.P2pNetworkConfig.MessageValidator = messageValidator + cfg.SSVOptions.ValidatorOptions.MessageValidator = messageValidator + // if signature check is enabled in message validation then it's disabled in validator controller and vice versa + cfg.SSVOptions.ValidatorOptions.VerifySignatures = !cfg.MessageValidation.VerifySignatures + + p2pNetwork := setupP2P(logger, db) + cfg.SSVOptions.Context = cmd.Context() cfg.SSVOptions.DB = db cfg.SSVOptions.BeaconNode = consensusClient @@ -178,6 +203,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.Network = p2pNetwork cfg.SSVOptions.ValidatorOptions.Beacon = consensusClient cfg.SSVOptions.ValidatorOptions.KeyManager = keyManager + cfg.SSVOptions.ValidatorOptions.ValidatorsMap = validatorsMap cfg.SSVOptions.ValidatorOptions.ShareEncryptionKeyProvider = nodeStorage.GetPrivateKey cfg.SSVOptions.ValidatorOptions.OperatorData = operatorData @@ -209,10 +235,10 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.StorageMap = storageMap cfg.SSVOptions.ValidatorOptions.Metrics = metricsReporter + cfg.SSVOptions.Metrics = metricsReporter validatorCtrl := validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) cfg.SSVOptions.ValidatorController = validatorCtrl - cfg.SSVOptions.Metrics = metricsReporter operatorNode = operator.New(logger, cfg.SSVOptions, slotTicker) @@ -477,10 +503,7 @@ func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { return networkConfig, nil } -func setupP2P( - logger *zap.Logger, - db basedb.Database, -) network.P2PNetwork { +func setupP2P(logger *zap.Logger, db basedb.Database) network.P2PNetwork { istore := ssv_identity.NewIdentityStore(db) netPrivKey, err := istore.SetupNetworkKey(logger, cfg.NetworkPrivateKey) if err != nil { diff --git a/docs/OPERATOR_GETTING_STARTED.md b/docs/OPERATOR_GETTING_STARTED.md index f46fdd08b1..d99c30ae52 100644 --- a/docs/OPERATOR_GETTING_STARTED.md +++ b/docs/OPERATOR_GETTING_STARTED.md @@ -148,7 +148,8 @@ OperatorPrivateKey: LS0tLS... ### 6. Start SSV Node in Docker -Run the docker image in the same folder you created the `config.yaml`: +Before start, make sure the clock is synced with NTP servers. +Then, run the docker image in the same folder you created the `config.yaml`: ```shell $ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config.yaml -p 13001:13001 -p 12001:12001/udp -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data --log-opt max-size=500m --log-opt max-file=10 -it 'bloxstaking/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node \ diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index ec33d5ed50..fa55d0dab6 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -39,6 +39,7 @@ import ( operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" @@ -1225,6 +1226,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, ne KeyManager: keyManager, StorageMap: storageMap, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index 49b34f5ccf..a735c53dc9 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -190,7 +190,7 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { } happyFlow := []string{ "successfully setup operator keys", - "setting validator controller", + "setting up validator controller", "malformed event: failed to verify signature", "processed events from block", } diff --git a/eth/eventsyncer/event_syncer_test.go b/eth/eventsyncer/event_syncer_test.go index 4cd2e73e68..9b500fe091 100644 --- a/eth/eventsyncer/event_syncer_test.go +++ b/eth/eventsyncer/event_syncer_test.go @@ -11,6 +11,7 @@ import ( "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/ethereum/go-ethereum/accounts/abi" @@ -152,6 +153,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger) *e DB: db, RegistryStorage: nodeStorage, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) diff --git a/go.mod b/go.mod index 5fa7730cf6..eebec2fb8a 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ require ( github.com/sourcegraph/conc v0.3.0 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 + github.com/wealdtech/go-eth2-types/v2 v2.8.1 github.com/wealdtech/go-eth2-util v1.8.1 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 go.uber.org/multierr v1.11.0 @@ -192,7 +193,6 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/urfave/cli/v2 v2.24.1 // indirect github.com/wealdtech/go-bytesutil v1.2.1 // indirect - github.com/wealdtech/go-eth2-types/v2 v2.8.1 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect diff --git a/integration/qbft/tests/msg_router.go b/integration/qbft/tests/msg_router.go index bf3b667e98..dda7b7c243 100644 --- a/integration/qbft/tests/msg_router.go +++ b/integration/qbft/tests/msg_router.go @@ -1,21 +1,26 @@ package tests import ( - spectypes "github.com/bloxapp/ssv-spec/types" - protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + "context" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" ) type msgRouter struct { + logger *zap.Logger validator *protocolvalidator.Validator } -func (m *msgRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - m.validator.HandleMessage(logger, &message) +func (m *msgRouter) Route(_ context.Context, message *queue.DecodedSSVMessage) { + m.validator.HandleMessage(m.logger, message) } -func newMsgRouter(v *protocolvalidator.Validator) *msgRouter { +func newMsgRouter(logger *zap.Logger, v *protocolvalidator.Validator) *msgRouter { return &msgRouter{ validator: v, + logger: logger, } } diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index 5fbf6c89b9..55736dd419 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -84,7 +84,7 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { copy(pk[:], getKeySet(s.Committee).ValidatorPK.Serialize()) ssvMsg, err := validator.CreateDutyExecuteMsg(duty, pk, networkconfig.TestNetwork.Domain) require.NoError(t, err) - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) require.NoError(t, err) s.validators[id].Queues[role].Q.Push(dec) @@ -218,7 +218,7 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID options.DutyRunners = validator.SetupRunners(ctx, logger, options) val := protocolvalidator.NewValidator(ctx, cancel, options) - node.UseMessageRouter(newMsgRouter(val)) + node.UseMessageRouter(newMsgRouter(logger, val)) started, err := val.Start(logger) require.NoError(t, err) require.True(t, started) diff --git a/logging/names.go b/logging/names.go index 5a23d12da9..298f6a9ee0 100644 --- a/logging/names.go +++ b/logging/names.go @@ -23,4 +23,5 @@ const ( NamePubsubTrace = "PubsubTrace" NameScoreInspector = "ScoreInspector" NameEventHandler = "EventHandler" + NameDutyFetcher = "DutyFetcher" ) diff --git a/logging/testing.go b/logging/testing.go index 6b6abd8326..b7617c2680 100644 --- a/logging/testing.go +++ b/logging/testing.go @@ -5,16 +5,17 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestLogger(t *testing.T) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(t, err) return zap.L().Named(t.Name()) } func BenchLogger(b *testing.B) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(b, err) return zap.L().Named(b.Name()) } diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go new file mode 100644 index 0000000000..6bdf023fc4 --- /dev/null +++ b/message/validation/consensus_validation.go @@ -0,0 +1,434 @@ +package validation + +// consensus_validation.go contains methods for validating consensus messages + +import ( + "bytes" + "encoding/hex" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validateConsensusMessage( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, + messageID spectypes.MessageID, + receivedAt time.Time, +) (ConsensusDescriptor, phase0.Slot, error) { + var consensusDescriptor ConsensusDescriptor + + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + consensusDescriptor = ConsensusDescriptor{ + QBFTMessageType: signedMsg.Message.MsgType, + Round: msgRound, + Signers: signedMsg.Signers, + Committee: share.Committee, + } + + mv.metrics.ConsensusMsgType(signedMsg.Message.MsgType, len(signedMsg.Signers)) + + if messageID.GetRoleType() == spectypes.BNRoleValidatorRegistration { + return consensusDescriptor, msgSlot, ErrConsensusValidatorRegistration + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return consensusDescriptor, msgSlot, err + } + + if !mv.validQBFTMsgType(signedMsg.Message.MsgType) { + return consensusDescriptor, msgSlot, ErrUnknownQBFTMessageType + } + + if err := mv.validConsensusSigners(share, signedMsg); err != nil { + return consensusDescriptor, msgSlot, err + } + + role := messageID.GetRoleType() + + if err := mv.validateSlotTime(msgSlot, role, receivedAt); err != nil { + return consensusDescriptor, msgSlot, err + } + + if maxRound := mv.maxRound(role); msgRound > maxRound { + err := ErrRoundTooHigh + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("%v (%v role)", maxRound, role) + return consensusDescriptor, msgSlot, err + } + + slotStartTime := mv.netCfg.Beacon.GetSlotStartTime(msgSlot) /*. + Add(mv.waitAfterSlotStart(role))*/ // TODO: not supported yet because first round is non-deterministic now + + sinceSlotStart := time.Duration(0) + estimatedRound := specqbft.FirstRound + if receivedAt.After(slotStartTime) { + sinceSlotStart = receivedAt.Sub(slotStartTime) + estimatedRound = mv.currentEstimatedRound(sinceSlotStart) + } + + // TODO: lowestAllowed is not supported yet because first round is non-deterministic now + lowestAllowed := /*estimatedRound - allowedRoundsInPast*/ specqbft.FirstRound + highestAllowed := estimatedRound + allowedRoundsInFuture + + if msgRound < lowestAllowed || msgRound > highestAllowed { + err := ErrEstimatedRoundTooFar + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("between %v and %v (%v role) / %v passed", lowestAllowed, highestAllowed, role, sinceSlotStart) + return consensusDescriptor, msgSlot, err + } + + if mv.hasFullData(signedMsg) { + hashedFullData, err := specqbft.HashDataRoot(signedMsg.FullData) + if err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("hash data root: %w", err) + } + + if hashedFullData != signedMsg.Message.Root { + return consensusDescriptor, msgSlot, ErrInvalidHash + } + } + + if err := mv.validateBeaconDuty(messageID.GetRoleType(), msgSlot, share); err != nil { + return consensusDescriptor, msgSlot, err + } + + state := mv.consensusState(messageID) + for _, signer := range signedMsg.Signers { + if err := mv.validateSignerBehaviorConsensus(state, signer, share, messageID, signedMsg); err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("bad signer behavior: %w", err) + } + } + + if mv.verifySignatures { + if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.QBFTSignatureType, share.Committee); err != nil { + signErr := ErrInvalidSignature + signErr.innerErr = err + signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) + return consensusDescriptor, msgSlot, signErr + } + } + + for _, signer := range signedMsg.Signers { + signerState := state.GetSignerState(signer) + if signerState == nil { + signerState = state.CreateSignerState(signer) + } + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, msgRound, newEpoch) + } else if msgSlot == signerState.Slot && msgRound > signerState.Round { + signerState.ResetRound(msgRound) + } + + if mv.hasFullData(signedMsg) && signerState.ProposalData == nil { + signerState.ProposalData = signedMsg.FullData + } + + signerState.MessageCounts.RecordConsensusMessage(signedMsg) + } + + return consensusDescriptor, msgSlot, nil +} + +func (mv *messageValidator) validateJustifications( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, +) error { + pj, err := signedMsg.Message.GetPrepareJustifications() + if err != nil { + e := ErrMalformedPrepareJustifications + e.innerErr = err + return e + } + + if len(pj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType { + e := ErrUnexpectedPrepareJustifications + e.got = signedMsg.Message.MsgType + return e + } + + rcj, err := signedMsg.Message.GetRoundChangeJustifications() + if err != nil { + e := ErrMalformedRoundChangeJustifications + e.innerErr = err + return e + } + + if len(rcj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType && signedMsg.Message.MsgType != specqbft.RoundChangeMsgType { + e := ErrUnexpectedRoundChangeJustifications + e.got = signedMsg.Message.MsgType + return e + } + + if signedMsg.Message.MsgType == specqbft.ProposalMsgType { + cfg := newQBFTConfig(mv.netCfg.Domain, mv.verifySignatures) + + if err := instance.IsProposalJustification( + cfg, + share, + rcj, + pj, + signedMsg.Message.Height, + signedMsg.Message.Round, + signedMsg.FullData, + ); err != nil { + e := ErrInvalidJustifications + e.innerErr = err + return e + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorConsensus( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *specqbft.SignedMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return mv.validateJustifications(share, signedMsg) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + if msgSlot == signerState.Slot && msgRound < signerState.Round { + // Signers aren't allowed to decrease their round. + // If they've sent a future message due to clock error, + // they'd have to wait for the next slot/round to be accepted. + err := ErrRoundAlreadyAdvanced + err.want = signerState.Round + err.got = msgRound + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if !(msgSlot > signerState.Slot || msgSlot == signerState.Slot && msgRound > signerState.Round) { + if mv.hasFullData(signedMsg) && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedMsg.FullData) { + return ErrDuplicatedProposalWithDifferentData + } + + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidateConsensusMessage(signedMsg, limits); err != nil { + return err + } + } + + return mv.validateJustifications(share, signedMsg) +} + +func (mv *messageValidator) validateDutyCount( + state *SignerState, + msgID spectypes.MessageID, + newDutyInSameEpoch bool, +) error { + switch msgID.GetRoleType() { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator, spectypes.BNRoleValidatorRegistration: + limit := maxDutiesPerEpoch + + if sameSlot := !newDutyInSameEpoch; sameSlot { + limit++ + } + + if state.EpochDuties >= limit { + err := ErrTooManyDutiesPerEpoch + err.got = fmt.Sprintf("%v (role %v)", state.EpochDuties, msgID.GetRoleType()) + err.want = fmt.Sprintf("less than %v", maxDutiesPerEpoch) + return err + } + + return nil + } + + return nil +} + +func (mv *messageValidator) validateBeaconDuty( + role spectypes.BeaconRole, + slot phase0.Slot, + share *ssvtypes.SSVShare, +) error { + switch role { + case spectypes.BNRoleProposer: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + epoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(slot) + if mv.dutyStore != nil && mv.dutyStore.Proposer.ValidatorDuty(epoch, slot, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + + case spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) + if mv.dutyStore != nil && mv.dutyStore.SyncCommittee.Duty(period, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + } + + return nil +} + +func (mv *messageValidator) hasFullData(signedMsg *specqbft.SignedMessage) bool { + return (signedMsg.Message.MsgType == specqbft.ProposalMsgType || + signedMsg.Message.MsgType == specqbft.RoundChangeMsgType || + mv.isDecidedMessage(signedMsg)) && len(signedMsg.FullData) != 0 // TODO: more complex check of FullData +} + +func (mv *messageValidator) isDecidedMessage(signedMsg *specqbft.SignedMessage) bool { + return signedMsg.Message.MsgType == specqbft.CommitMsgType && len(signedMsg.Signers) > 1 +} + +func (mv *messageValidator) maxRound(role spectypes.BeaconRole) specqbft.Round { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit + return 12 // TODO: consider calculating based on quick timeout and slow timeout + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + return 6 + case spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) specqbft.Round { + if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= roundtimer.QuickTimeoutThreshold { + return currentQuickRound + } + + sinceFirstSlowRound := sinceSlotStart - (time.Duration(roundtimer.QuickTimeoutThreshold) * roundtimer.QuickTimeout) + estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) + return estimatedRound +} + +func (mv *messageValidator) waitAfterSlotStart(role spectypes.BeaconRole) time.Duration { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + return mv.netCfg.Beacon.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + return mv.netCfg.Beacon.SlotDurationSec() / 3 * 2 + case spectypes.BNRoleProposer, spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) validRole(roleType spectypes.BeaconRole) bool { + switch roleType { + case spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + spectypes.BNRoleValidatorRegistration: + return true + } + return false +} + +func (mv *messageValidator) validQBFTMsgType(msgType specqbft.MessageType) bool { + switch msgType { + case specqbft.ProposalMsgType, specqbft.PrepareMsgType, specqbft.CommitMsgType, specqbft.RoundChangeMsgType: + return true + } + return false +} + +func (mv *messageValidator) validConsensusSigners(share *ssvtypes.SSVShare, m *specqbft.SignedMessage) error { + switch { + case len(m.Signers) == 0: + return ErrNoSigners + + case len(m.Signers) == 1: + if m.Message.MsgType == specqbft.ProposalMsgType { + qbftState := &specqbft.State{ + Height: m.Message.Height, + Share: &share.Share, + } + leader := specqbft.RoundRobinProposer(qbftState, m.Message.Round) + if m.Signers[0] != leader { + err := ErrSignerNotLeader + err.got = m.Signers[0] + err.want = leader + return err + } + } + + case m.Message.MsgType != specqbft.CommitMsgType: + e := ErrNonDecidedWithMultipleSigners + e.got = len(m.Signers) + return e + + case !share.HasQuorum(len(m.Signers)) || len(m.Signers) > len(share.Committee): + e := ErrWrongSignersLength + e.want = fmt.Sprintf("between %v and %v", share.Quorum, len(share.Committee)) + e.got = len(m.Signers) + return e + } + + if !slices.IsSorted(m.Signers) { + return ErrSignersNotSorted + } + + var prevSigner spectypes.OperatorID + for _, signer := range m.Signers { + if err := mv.commonSignerValidation(signer, share); err != nil { + return err + } + if signer == prevSigner { + return ErrDuplicatedSigner + } + prevSigner = signer + } + return nil +} diff --git a/message/validation/consensus_validation_test.go b/message/validation/consensus_validation_test.go new file mode 100644 index 0000000000..5f0ae02df1 --- /dev/null +++ b/message/validation/consensus_validation_test.go @@ -0,0 +1,104 @@ +package validation + +import ( + "testing" + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" +) + +func TestMessageValidator_currentEstimatedRound(t *testing.T) { + tt := []struct { + name string + sinceSlotStart time.Duration + want specqbft.Round + }{ + { + name: "0s - expected first round", + sinceSlotStart: 0, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout/2 - expected first round", + sinceSlotStart: roundtimer.QuickTimeout / 2, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout - expected first+1 round", + sinceSlotStart: roundtimer.QuickTimeout, + want: specqbft.FirstRound + 1, + }, + { + name: "QuickTimeout*2 - expected first+2 round", + sinceSlotStart: roundtimer.QuickTimeout * 2, + want: specqbft.FirstRound + 2, + }, + { + name: "QuickTimeout*3 - expected first+3 round", + sinceSlotStart: roundtimer.QuickTimeout * 3, + want: specqbft.FirstRound + 3, + }, + { + name: "QuickTimeout*4 - expected first+4 round", + sinceSlotStart: roundtimer.QuickTimeout * 4, + want: specqbft.FirstRound + 4, + }, + { + name: "QuickTimeout*5 - expected first+5 round", + sinceSlotStart: roundtimer.QuickTimeout * 5, + want: specqbft.FirstRound + 5, + }, + { + name: "QuickTimeout*6 - expected first+6 round", + sinceSlotStart: roundtimer.QuickTimeout * 6, + want: specqbft.FirstRound + 6, + }, + { + name: "QuickTimeout*7 - expected first+7 round", + sinceSlotStart: roundtimer.QuickTimeout * 7, + want: specqbft.FirstRound + 7, + }, + { + name: "QuickTimeout*8 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * 8, + want: specqbft.FirstRound + 8, + }, + { + name: "QuickTimeout*9 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+1), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "QuickTimeout*10 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+2), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "(QuickTimeout*8 + SlowTimeout) - expected first+9 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout, + want: roundtimer.QuickTimeoutThreshold + 2, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*2) - expected first+10 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*2, + want: roundtimer.QuickTimeoutThreshold + 3, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*3) - expected first+11 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*3, + want: roundtimer.QuickTimeoutThreshold + 4, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + mv := &messageValidator{} + got := mv.currentEstimatedRound(tc.sinceSlotStart) + require.Equal(t, tc.want, got) + }) + } +} diff --git a/message/validation/errors.go b/message/validation/errors.go new file mode 100644 index 0000000000..f27d3b4901 --- /dev/null +++ b/message/validation/errors.go @@ -0,0 +1,100 @@ +package validation + +import ( + "fmt" + "strings" +) + +type Error struct { + text string + got any + want any + innerErr error + reject bool + silent bool +} + +func (e Error) Error() string { + var sb strings.Builder + sb.WriteString(e.text) + + if e.got != nil { + sb.WriteString(fmt.Sprintf(", got %v", e.got)) + } + if e.want != nil { + sb.WriteString(fmt.Sprintf(", want %v", e.want)) + } + if e.innerErr != nil { + sb.WriteString(fmt.Sprintf(": %s", e.innerErr.Error())) + } + + return sb.String() +} + +func (e Error) Reject() bool { + return e.reject +} + +func (e Error) Silent() bool { + return e.silent +} + +func (e Error) Text() string { + return e.text +} + +var ( + ErrEmptyData = Error{text: "empty data"} + ErrWrongDomain = Error{text: "wrong domain"} + ErrNoShareMetadata = Error{text: "share has no metadata"} + ErrUnknownValidator = Error{text: "unknown validator"} + ErrValidatorLiquidated = Error{text: "validator is liquidated"} + ErrValidatorNotAttesting = Error{text: "validator is not attesting"} + ErrSlotAlreadyAdvanced = Error{text: "signer has already advanced to a later slot"} + ErrRoundAlreadyAdvanced = Error{text: "signer has already advanced to a later round"} + ErrRoundTooHigh = Error{text: "round is too high for this role" /*, reject: true*/} // TODO: enable reject + ErrEarlyMessage = Error{text: "early message"} + ErrLateMessage = Error{text: "late message"} + ErrTooManySameTypeMessagesPerRound = Error{text: "too many messages of same type per round"} + ErrPubSubMessageHasNoData = Error{text: "pub-sub message has no data", reject: true} + ErrPubSubDataTooBig = Error{text: "pub-sub message data too big", reject: true} + ErrMalformedPubSubMessage = Error{text: "pub-sub message is malformed", reject: true} + ErrEmptyPubSubMessage = Error{text: "pub-sub message is empty", reject: true} + ErrTopicNotFound = Error{text: "topic not found", reject: true} + ErrSSVDataTooBig = Error{text: "ssv message data too big", reject: true} + ErrInvalidRole = Error{text: "invalid role", reject: true} + ErrConsensusValidatorRegistration = Error{text: "consensus message for validator registration role", reject: true} + ErrNoSigners = Error{text: "no signers", reject: true} + ErrWrongSignatureSize = Error{text: "wrong signature size", reject: true} + ErrZeroSignature = Error{text: "zero signature", reject: true} + ErrZeroSigner = Error{text: "zero signer ID", reject: true} + ErrSignerNotInCommittee = Error{text: "signer is not in committee", reject: true} + ErrDuplicatedSigner = Error{text: "signer is duplicated", reject: true} + ErrSignerNotLeader = Error{text: "signer is not leader", reject: true} + ErrSignersNotSorted = Error{text: "signers are not sorted", reject: true} + ErrUnexpectedSigner = Error{text: "signer is not expected", reject: true} + ErrInvalidHash = Error{text: "root doesn't match full data hash", reject: true} + ErrInvalidSignature = Error{text: "invalid signature", reject: true} + ErrInvalidPartialSignature = Error{text: "invalid partial signature", reject: true} + ErrEstimatedRoundTooFar = Error{text: "message round is too far from estimated"} + ErrMalformedMessage = Error{text: "message could not be decoded", reject: true} + ErrUnknownSSVMessageType = Error{text: "unknown SSV message type", reject: true} + ErrUnknownQBFTMessageType = Error{text: "unknown QBFT message type", reject: true} + ErrUnknownPartialMessageType = Error{text: "unknown partial signature message type", reject: true} + ErrPartialSignatureTypeRoleMismatch = Error{text: "partial signature type and role don't match", reject: true} + ErrNonDecidedWithMultipleSigners = Error{text: "non-decided with multiple signers", reject: true} + ErrWrongSignersLength = Error{text: "decided signers size is not between quorum and committee size", reject: true} + ErrDuplicatedProposalWithDifferentData = Error{text: "duplicated proposal with different data", reject: true} + ErrEventMessage = Error{text: "event messages are not broadcast", reject: true} + ErrDKGMessage = Error{text: "DKG messages are not supported", reject: true} + ErrMalformedPrepareJustifications = Error{text: "malformed prepare justifications", reject: true} + ErrUnexpectedPrepareJustifications = Error{text: "prepare justifications unexpected for this message type", reject: true} + ErrMalformedRoundChangeJustifications = Error{text: "malformed round change justifications", reject: true} + ErrUnexpectedRoundChangeJustifications = Error{text: "round change justifications unexpected for this message type", reject: true} + ErrInvalidJustifications = Error{text: "invalid justifications", reject: true} + ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch", reject: true} + ErrNoDuty = Error{text: "no duty for this epoch", reject: true} + ErrDeserializePublicKey = Error{text: "deserialize public key", reject: true} + ErrNoPartialMessages = Error{text: "no partial messages", reject: true} + ErrDuplicatedPartialSignatureMessage = Error{text: "duplicated partial signature message", reject: true} +) diff --git a/message/validation/message_counts.go b/message/validation/message_counts.go new file mode 100644 index 0000000000..609ed018bc --- /dev/null +++ b/message/validation/message_counts.go @@ -0,0 +1,156 @@ +package validation + +// message_counts.go contains code for counting and validating messages per validator-slot-round. + +import ( + "fmt" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +// MessageCounts tracks the number of various message types received for validation. +type MessageCounts struct { + PreConsensus int + Proposal int + Prepare int + Commit int + Decided int + RoundChange int + PostConsensus int +} + +// String provides a formatted representation of the MessageCounts. +func (c *MessageCounts) String() string { + return fmt.Sprintf("pre-consensus: %v, proposal: %v, prepare: %v, commit: %v, decided: %v, round change: %v, post-consensus: %v", + c.PreConsensus, + c.Proposal, + c.Prepare, + c.Commit, + c.Decided, + c.RoundChange, + c.PostConsensus, + ) +} + +// ValidateConsensusMessage checks if the provided consensus message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidateConsensusMessage(msg *specqbft.SignedMessage, limits MessageCounts) error { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + if c.Proposal >= limits.Proposal { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("proposal, having %v", c.String()) + return err + } + case specqbft.PrepareMsgType: + if c.Prepare >= limits.Prepare { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("prepare, having %v", c.String()) + return err + } + case specqbft.CommitMsgType: + if len(msg.Signers) == 1 { + if c.Commit >= limits.Commit { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("commit, having %v", c.String()) + return err + } + } + if len(msg.Signers) > 1 { + if c.Decided >= limits.Decided { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("decided, having %v", c.String()) + return err + } + } + case specqbft.RoundChangeMsgType: + if c.RoundChange >= limits.RoundChange { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("round change, having %v", c.String()) + return err + } + default: + panic("unexpected signed message type") // should be checked before + } + + return nil +} + +// ValidatePartialSignatureMessage checks if the provided partial signature message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.SignedPartialSignatureMessage, limits MessageCounts) error { + switch m.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + if c.PreConsensus > limits.PreConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("pre-consensus, having %v", c.String()) + return err + } + case spectypes.PostConsensusPartialSig: + if c.PostConsensus > limits.PostConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("post-consensus, having %v", c.String()) + return err + } + default: + panic("unexpected partial signature message type") // should be checked before + } + + return nil +} + +// RecordConsensusMessage updates the counts based on the provided consensus message type. +func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + c.Proposal++ + case specqbft.PrepareMsgType: + c.Prepare++ + case specqbft.CommitMsgType: + switch { + case len(msg.Signers) == 1: + c.Commit++ + case len(msg.Signers) > 1: + c.Decided++ + default: + panic("expected signers") // 0 length should be checked before + } + case specqbft.RoundChangeMsgType: + c.RoundChange++ + default: + panic("unexpected signed message type") // should be checked before + } +} + +// RecordPartialSignatureMessage updates the counts based on the provided partial signature message type. +func (c *MessageCounts) RecordPartialSignatureMessage(msg *spectypes.SignedPartialSignatureMessage) { + switch msg.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + c.PreConsensus++ + case spectypes.PostConsensusPartialSig: + c.PostConsensus++ + default: + panic("unexpected partial signature message type") // should be checked before + } +} + +// maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. +func maxMessageCounts(committeeSize int) MessageCounts { + maxDecided := maxDecidedCount(committeeSize) + + return MessageCounts{ + PreConsensus: 1, + Proposal: 1, + Prepare: 1, + Commit: 1, + Decided: maxDecided, + RoundChange: 1, + PostConsensus: 1, + } +} + +func maxDecidedCount(committeeSize int) int { + f := (committeeSize - 1) / 3 + return committeeSize * (f + 1) // N * (f + 1) +} diff --git a/message/validation/metrics.go b/message/validation/metrics.go new file mode 100644 index 0000000000..f023fe0689 --- /dev/null +++ b/message/validation/metrics.go @@ -0,0 +1,38 @@ +package validation + +import ( + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +type metrics interface { + MessageAccepted(role spectypes.BeaconRole, round specqbft.Round) + MessageIgnored(reason string, role spectypes.BeaconRole, round specqbft.Round) + MessageRejected(reason string, role spectypes.BeaconRole, round specqbft.Round) + SSVMessageType(msgType spectypes.MsgType) + ConsensusMsgType(msgType specqbft.MessageType, signers int) + MessageValidationDuration(duration time.Duration, labels ...string) + SignatureValidationDuration(duration time.Duration, labels ...string) + MessageSize(size int) + ActiveMsgValidation(topic string) + ActiveMsgValidationDone(topic string) + InCommitteeMessage(msgType spectypes.MsgType, decided bool) + NonCommitteeMessage(msgType spectypes.MsgType, decided bool) +} + +type nopMetrics struct{} + +func (*nopMetrics) ConsensusMsgType(specqbft.MessageType, int) {} +func (*nopMetrics) MessageAccepted(spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageIgnored(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageRejected(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) SSVMessageType(spectypes.MsgType) {} +func (*nopMetrics) MessageValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) SignatureValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) MessageSize(int) {} +func (*nopMetrics) ActiveMsgValidation(string) {} +func (*nopMetrics) ActiveMsgValidationDone(string) {} +func (*nopMetrics) InCommitteeMessage(spectypes.MsgType, bool) {} +func (*nopMetrics) NonCommitteeMessage(spectypes.MsgType, bool) {} diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go new file mode 100644 index 0000000000..781267f22d --- /dev/null +++ b/message/validation/partial_validation.go @@ -0,0 +1,251 @@ +package validation + +// partial_validation.go contains methods for validating partial signature messages + +import ( + "encoding/hex" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/herumi/bls-eth-go-binary/bls" + "golang.org/x/exp/slices" + + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validatePartialSignatureMessage( + share *ssvtypes.SSVShare, + signedMsg *spectypes.SignedPartialSignatureMessage, + msgID spectypes.MessageID, +) (phase0.Slot, error) { + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } + + msgSlot := signedMsg.Message.Slot + + if !mv.validPartialSigMsgType(signedMsg.Message.Type) { + e := ErrUnknownPartialMessageType + e.got = signedMsg.Message.Type + return msgSlot, e + } + + role := msgID.GetRoleType() + if !mv.partialSignatureTypeMatchesRole(signedMsg.Message.Type, role) { + return msgSlot, ErrPartialSignatureTypeRoleMismatch + } + + if err := mv.validatePartialMessages(share, signedMsg); err != nil { + return msgSlot, err + } + + state := mv.consensusState(msgID) + signerState := state.GetSignerState(signedMsg.Signer) + if signerState != nil { + if err := mv.validateSignerBehaviorPartial(state, signedMsg.Signer, share, msgID, signedMsg); err != nil { + return msgSlot, err + } + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return msgSlot, err + } + + if mv.verifySignatures { + if err := mv.validPartialSignatures(share, signedMsg); err != nil { + return msgSlot, err + } + } + + if signerState == nil { + signerState = state.CreateSignerState(signedMsg.Signer) + } + + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, specqbft.FirstRound, newEpoch) + } + + signerState.MessageCounts.RecordPartialSignatureMessage(signedMsg) + + return msgSlot, nil +} + +func (mv *messageValidator) inCommittee(share *ssvtypes.SSVShare) bool { + return slices.ContainsFunc(share.Committee, func(operator *spectypes.Operator) bool { + return operator.OperatorID == mv.ownOperatorID + }) +} + +func (mv *messageValidator) validPartialSigMsgType(msgType spectypes.PartialSigMsgType) bool { + switch msgType { + case spectypes.PostConsensusPartialSig, + spectypes.RandaoPartialSig, + spectypes.SelectionProofPartialSig, + spectypes.ContributionProofs, + spectypes.ValidatorRegistrationPartialSig: + return true + default: + return false + } +} + +func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType spectypes.PartialSigMsgType, role spectypes.BeaconRole) bool { + switch role { + case spectypes.BNRoleAttester: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleAggregator: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.SelectionProofPartialSig + case spectypes.BNRoleProposer: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.RandaoPartialSig + case spectypes.BNRoleSyncCommittee: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleSyncCommitteeContribution: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.ContributionProofs + case spectypes.BNRoleValidatorRegistration: + return msgType == spectypes.ValidatorRegistrationPartialSig + default: + panic("invalid role") // role validity should be checked before + } +} + +func (mv *messageValidator) validPartialSignatures(share *ssvtypes.SSVShare, signedMsg *spectypes.SignedPartialSignatureMessage) error { + if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.PartialSignatureType, share.Committee); err != nil { + signErr := ErrInvalidSignature + signErr.innerErr = err + signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) + return signErr + } + + for _, message := range signedMsg.Message.Messages { + if err := mv.verifyPartialSignature(message, share); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) verifyPartialSignature(msg *spectypes.PartialSignatureMessage, share *ssvtypes.SSVShare) error { + signer := msg.Signer + signature := msg.PartialSignature + root := msg.SigningRoot + + for _, n := range share.Committee { + if n.GetID() != signer { + continue + } + + pk, err := ssvtypes.DeserializeBLSPublicKey(n.GetPublicKey()) + if err != nil { + return fmt.Errorf("deserialize pk: %w", err) + } + sig := &bls.Sign{} + if err := sig.Deserialize(signature); err != nil { + return fmt.Errorf("deserialize signature: %w", err) + } + + if !mv.aggregateVerify(sig, pk, root) { + return ErrInvalidPartialSignature + } + + return nil + } + + return ErrSignerNotInCommittee +} + +func (mv *messageValidator) aggregateVerify(sig *bls.Sign, pk bls.PublicKey, root [32]byte) bool { + start := time.Now() + + valid := sig.FastAggregateVerify([]bls.PublicKey{pk}, root[:]) + + sinceStart := time.Since(start) + mv.metrics.SignatureValidationDuration(sinceStart) + + return valid +} + +func (mv *messageValidator) validatePartialMessages(share *ssvtypes.SSVShare, m *spectypes.SignedPartialSignatureMessage) error { + if err := mv.commonSignerValidation(m.Signer, share); err != nil { + return err + } + + if len(m.Message.Messages) == 0 { + return ErrNoPartialMessages + } + + seen := map[[32]byte]struct{}{} + for _, message := range m.Message.Messages { + if _, ok := seen[message.SigningRoot]; ok { + return ErrDuplicatedPartialSignatureMessage + } + seen[message.SigningRoot] = struct{}{} + + if message.Signer != m.Signer { + err := ErrUnexpectedSigner + err.want = m.Signer + err.got = message.Signer + return err + } + + if err := mv.commonSignerValidation(message.Signer, share); err != nil { + return err + } + + if err := mv.validateSignatureFormat(message.PartialSignature); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorPartial( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *spectypes.SignedPartialSignatureMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return nil + } + + msgSlot := signedMsg.Message.Slot + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if msgSlot <= signerState.Slot { + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidatePartialSignatureMessage(signedMsg, limits); err != nil { + return err + } + } + + return nil +} diff --git a/message/validation/qbft_config.go b/message/validation/qbft_config.go new file mode 100644 index 0000000000..fe5ed6dc04 --- /dev/null +++ b/message/validation/qbft_config.go @@ -0,0 +1,53 @@ +package validation + +import ( + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" +) + +type qbftConfig struct { + domain spectypes.DomainType + verifySignature bool +} + +func newQBFTConfig(domain spectypes.DomainType, verifySignature bool) qbftConfig { + return qbftConfig{ + domain: domain, + verifySignature: verifySignature, + } +} + +func (q qbftConfig) GetSigner() spectypes.SSVSigner { + panic("should not be called") +} + +func (q qbftConfig) GetSignatureDomainType() spectypes.DomainType { + return q.domain +} + +func (q qbftConfig) GetValueCheckF() specqbft.ProposedValueCheckF { + panic("should not be called") +} + +func (q qbftConfig) GetProposerF() specqbft.ProposerF { + panic("should not be called") +} + +func (q qbftConfig) GetNetwork() specqbft.Network { + panic("should not be called") +} + +func (q qbftConfig) GetStorage() qbftstorage.QBFTStore { + panic("should not be called") +} + +func (q qbftConfig) GetTimer() roundtimer.Timer { + panic("should not be called") +} + +func (q qbftConfig) VerifySignatures() bool { + return q.verifySignature +} diff --git a/message/validation/signer_state.go b/message/validation/signer_state.go new file mode 100644 index 0000000000..dc9bf1818e --- /dev/null +++ b/message/validation/signer_state.go @@ -0,0 +1,45 @@ +package validation + +// signer_state.go describes state of a signer. + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" +) + +// SignerState represents the state of a signer, including its start time, slot, round, +// message counts, proposal data, and the number of duties performed in the current epoch. +type SignerState struct { + Start time.Time + Slot phase0.Slot + Round specqbft.Round + MessageCounts MessageCounts + ProposalData []byte + EpochDuties int +} + +// ResetSlot resets the state's slot, round, message counts, and proposal data to the given values. +// It also updates the start time to the current time and increments the epoch duties count if it's a new epoch. +func (s *SignerState) ResetSlot(slot phase0.Slot, round specqbft.Round, newEpoch bool) { + s.Start = time.Now() + s.Slot = slot + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil + if newEpoch { + s.EpochDuties = 1 + } else { + s.EpochDuties++ + } +} + +// ResetRound resets the state's round, message counts, and proposal data to the given values. +// It also updates the start time to the current time. +func (s *SignerState) ResetRound(round specqbft.Round) { + s.Start = time.Now() + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil +} diff --git a/message/validation/validation.go b/message/validation/validation.go new file mode 100644 index 0000000000..98e100fa3c --- /dev/null +++ b/message/validation/validation.go @@ -0,0 +1,556 @@ +// Package validation provides functions and structures for validating messages. +package validation + +// validator.go contains main code for validation and most of the rule checks. + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "strings" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/cornelk/hashmap" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +const ( + // lateMessageMargin is the duration past a message's TTL in which it is still considered valid. + lateMessageMargin = time.Second * 3 + + // clockErrorTolerance is the maximum amount of clock error we expect to see between nodes. + clockErrorTolerance = time.Millisecond * 50 + + maxMessageSize = maxConsensusMsgSize + maxConsensusMsgSize = 8388608 + maxPartialSignatureMsgSize = 1952 + allowedRoundsInFuture = 1 + allowedRoundsInPast = 2 + lateSlotAllowance = 2 + signatureSize = 96 + maxDutiesPerEpoch = 2 +) + +// ConsensusID uniquely identifies a public key and role pair to keep track of state. +type ConsensusID struct { + PubKey phase0.BLSPubKey + Role spectypes.BeaconRole +} + +// ConsensusState keeps track of the signers for a given public key and role. +type ConsensusState struct { + // TODO: consider evicting old data to avoid excessive memory consumption + Signers *hashmap.Map[spectypes.OperatorID, *SignerState] +} + +// GetSignerState retrieves the state for the given signer. +// Returns nil if the signer is not found. +func (cs *ConsensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { + signerState, _ := cs.Signers.Get(signer) + return signerState +} + +// CreateSignerState initializes and sets a new SignerState for the given signer. +func (cs *ConsensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { + signerState := &SignerState{} + cs.Signers.Set(signer, signerState) + + return signerState +} + +// PubsubMessageValidator defines methods for validating pubsub messages. +type PubsubMessageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult + ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} + +// SSVMessageValidator defines methods for validating SSV messages. +type SSVMessageValidator interface { + ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) +} + +// MessageValidator is an interface that combines both PubsubMessageValidator and SSVMessageValidator. +type MessageValidator interface { + PubsubMessageValidator + SSVMessageValidator +} + +type messageValidator struct { + logger *zap.Logger + metrics metrics + netCfg networkconfig.NetworkConfig + index sync.Map + shareStorage registrystorage.Shares + dutyStore *dutystore.Store + ownOperatorID spectypes.OperatorID + verifySignatures bool +} + +// NewMessageValidator returns a new MessageValidator with the given network configuration and options. +func NewMessageValidator(netCfg networkconfig.NetworkConfig, opts ...Option) MessageValidator { + mv := &messageValidator{ + logger: zap.NewNop(), + metrics: &nopMetrics{}, + netCfg: netCfg, + } + + for _, opt := range opts { + opt(mv) + } + + return mv +} + +// Option represents a functional option for configuring a messageValidator. +type Option func(validator *messageValidator) + +// WithLogger sets the logger for the messageValidator. +func WithLogger(logger *zap.Logger) Option { + return func(mv *messageValidator) { + mv.logger = logger + } +} + +// WithMetrics sets the metrics for the messageValidator. +func WithMetrics(metrics metrics) Option { + return func(mv *messageValidator) { + mv.metrics = metrics + } +} + +// WithDutyStore sets the duty store for the messageValidator. +func WithDutyStore(dutyStore *dutystore.Store) Option { + return func(mv *messageValidator) { + mv.dutyStore = dutyStore + } +} + +// WithOwnOperatorID sets the operator ID for the messageValidator. +func WithOwnOperatorID(id spectypes.OperatorID) Option { + return func(mv *messageValidator) { + mv.ownOperatorID = id + } +} + +// WithShareStorage sets the share storage for the messageValidator. +func WithShareStorage(shareStorage registrystorage.Shares) Option { + return func(mv *messageValidator) { + mv.shareStorage = shareStorage + } +} + +// WithSignatureVerification sets whether to verify signatures in the messageValidator. +func WithSignatureVerification(check bool) Option { + return func(mv *messageValidator) { + mv.verifySignatures = check + } +} + +// ConsensusDescriptor provides details about the consensus for a message. It's used for logging and metrics. +type ConsensusDescriptor struct { + Round specqbft.Round + QBFTMessageType specqbft.MessageType + Signers []spectypes.OperatorID + Committee []*spectypes.Operator +} + +// Descriptor provides details about a message. It's used for logging and metrics. +type Descriptor struct { + ValidatorPK spectypes.ValidatorPK + Role spectypes.BeaconRole + SSVMessageType spectypes.MsgType + Slot phase0.Slot + Consensus *ConsensusDescriptor +} + +// Fields returns zap logging fields for the descriptor. +func (d Descriptor) Fields() []zapcore.Field { + result := []zapcore.Field{ + fields.Validator(d.ValidatorPK), + fields.Role(d.Role), + zap.String("ssv_message_type", ssvmessage.MsgTypeToString(d.SSVMessageType)), + fields.Slot(d.Slot), + } + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + result = append(result, + fields.Round(d.Consensus.Round), + zap.String("qbft_message_type", ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType)), + zap.Uint64s("signers", d.Consensus.Signers), + zap.Uint64s("committee", committee), + ) + } + + return result +} + +// String provides a string representation of the descriptor. It may be useful for logging. +func (d Descriptor) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("validator PK: %v, role: %v, ssv message type: %v, slot: %v", + hex.EncodeToString(d.ValidatorPK), + d.Role.String(), + ssvmessage.MsgTypeToString(d.SSVMessageType), + d.Slot, + )) + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + sb.WriteString(fmt.Sprintf(", round: %v, qbft message type: %v, signers: %v, committee: %v", + d.Consensus.Round, + ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType), + d.Consensus.Signers, + committee, + )) + } + + return sb.String() +} + +// ValidatorForTopic returns a validation function for the given topic. +// This function can be used to validate messages within the libp2p pubsub framework. +func (mv *messageValidator) ValidatorForTopic(_ string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return mv.ValidatePubsubMessage +} + +// ValidatePubsubMessage validates the given pubsub message. +// Depending on the outcome, it will return one of the pubsub validation results (Accept, Ignore, or Reject). +func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, _ peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + start := time.Now() + var validationDurationLabels []string // TODO: implement + + defer func() { + sinceStart := time.Since(start) + mv.metrics.MessageValidationDuration(sinceStart, validationDurationLabels...) + }() + + decodedMessage, descriptor, err := mv.validateP2PMessage(pmsg, time.Now()) + round := specqbft.Round(0) + if descriptor.Consensus != nil { + round = descriptor.Consensus.Round + } + + if err != nil { + var valErr Error + if errors.As(err, &valErr) { + if valErr.Reject() { + if !valErr.Silent() { + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("rejecting invalid message", f...) + } + + mv.metrics.MessageRejected(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationReject + } + + if !valErr.Silent() { + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + } + mv.metrics.MessageIgnored(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationIgnore + } + + mv.metrics.MessageIgnored(err.Error(), descriptor.Role, round) + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + return pubsub.ValidationIgnore + } + + pmsg.ValidatorData = decodedMessage + + mv.metrics.MessageAccepted(descriptor.Role, round) + + return pubsub.ValidationAccept +} + +// ValidateSSVMessage validates the given SSV message. +// If successful, it returns the decoded message and its descriptor. Otherwise, it returns an error. +func (mv *messageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) { + return mv.validateSSVMessage(ssvMessage, time.Now()) +} + +func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + topic := pMsg.GetTopic() + + mv.metrics.ActiveMsgValidation(topic) + defer mv.metrics.ActiveMsgValidationDone(topic) + + messageData := pMsg.GetData() + if len(messageData) == 0 { + return nil, Descriptor{}, ErrPubSubMessageHasNoData + } + + mv.metrics.MessageSize(len(messageData)) + + // Max possible MsgType + MsgID + Data plus 10% for encoding overhead + const maxMsgSize = 4 + 56 + 8388668 + const maxEncodedMsgSize = maxMsgSize + maxMsgSize/10 + if len(messageData) > maxEncodedMsgSize { + e := ErrPubSubDataTooBig + e.got = len(messageData) + return nil, Descriptor{}, e + } + + msg, err := commons.DecodeNetworkMsg(messageData) + if err != nil { + e := ErrMalformedPubSubMessage + e.innerErr = err + return nil, Descriptor{}, e + } + + if msg == nil { + return nil, Descriptor{}, ErrEmptyPubSubMessage + } + + // Check if the message was sent on the right topic. + currentTopic := pMsg.GetTopic() + currentTopicBaseName := commons.GetTopicBaseName(currentTopic) + topics := commons.ValidatorTopicID(msg.GetID().GetPubKey()) + + topicFound := false + for _, tp := range topics { + if tp == currentTopicBaseName { + topicFound = true + break + } + } + if !topicFound { + return nil, Descriptor{}, ErrTopicNotFound + } + + mv.metrics.SSVMessageType(msg.MsgType) + + return mv.validateSSVMessage(msg, receivedAt) +} + +func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + var descriptor Descriptor + + if len(ssvMessage.Data) == 0 { + return nil, descriptor, ErrEmptyData + } + + if len(ssvMessage.Data) > maxMessageSize { + err := ErrSSVDataTooBig + err.got = len(ssvMessage.Data) + err.want = maxMessageSize + return nil, descriptor, err + } + + if !bytes.Equal(ssvMessage.MsgID.GetDomain(), mv.netCfg.Domain[:]) { + err := ErrWrongDomain + err.got = hex.EncodeToString(ssvMessage.MsgID.GetDomain()) + err.want = hex.EncodeToString(mv.netCfg.Domain[:]) + return nil, descriptor, err + } + + validatorPK := ssvMessage.GetID().GetPubKey() + role := ssvMessage.GetID().GetRoleType() + descriptor.Role = role + descriptor.ValidatorPK = validatorPK + + if !mv.validRole(role) { + return nil, descriptor, ErrInvalidRole + } + + publicKey, err := ssvtypes.DeserializeBLSPublicKey(validatorPK) + if err != nil { + e := ErrDeserializePublicKey + e.innerErr = err + return nil, descriptor, e + } + + var share *ssvtypes.SSVShare + if mv.shareStorage != nil { + share = mv.shareStorage.Get(nil, publicKey.Serialize()) + if share == nil { + e := ErrUnknownValidator + e.got = publicKey.SerializeToHexStr() + return nil, descriptor, e + } + + if share.Liquidated { + return nil, descriptor, ErrValidatorLiquidated + } + + if share.BeaconMetadata == nil { + return nil, descriptor, ErrNoShareMetadata + } + + if !share.BeaconMetadata.IsAttesting() { + err := ErrValidatorNotAttesting + err.got = share.BeaconMetadata.Status.String() + return nil, descriptor, err + } + } + + msg, err := queue.DecodeSSVMessage(ssvMessage) + if err != nil { + if errors.Is(err, queue.ErrUnknownMessageType) { + e := ErrUnknownSSVMessageType + e.got = ssvMessage.GetType() + return nil, descriptor, e + } + + e := ErrMalformedMessage + e.innerErr = err + return nil, descriptor, e + } + + descriptor.SSVMessageType = ssvMessage.MsgType + + if mv.shareStorage != nil { + switch ssvMessage.MsgType { + case spectypes.SSVConsensusMsgType: + if len(msg.Data) > maxConsensusMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxConsensusMsgSize + return nil, descriptor, e + } + + consensusDescriptor, slot, err := mv.validateConsensusMessage(share, msg.Body.(*specqbft.SignedMessage), msg.GetID(), receivedAt) + descriptor.Consensus = &consensusDescriptor + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case spectypes.SSVPartialSignatureMsgType: + if len(msg.Data) > maxPartialSignatureMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxPartialSignatureMsgSize + return nil, descriptor, e + } + + slot, err := mv.validatePartialSignatureMessage(share, msg.Body.(*spectypes.SignedPartialSignatureMessage), msg.GetID()) + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case ssvmessage.SSVEventMsgType: + return nil, descriptor, ErrEventMessage + + case spectypes.DKGMsgType: + return nil, descriptor, ErrDKGMessage + } + } + + return msg, descriptor, nil +} + +func (mv *messageValidator) containsSignerFunc(signer spectypes.OperatorID) func(operator *spectypes.Operator) bool { + return func(operator *spectypes.Operator) bool { + return operator.OperatorID == signer + } +} + +func (mv *messageValidator) validateSignatureFormat(signature []byte) error { + if len(signature) != signatureSize { + e := ErrWrongSignatureSize + e.got = len(signature) + return e + } + + if [signatureSize]byte(signature) == [signatureSize]byte{} { + return ErrZeroSignature + } + return nil +} + +func (mv *messageValidator) commonSignerValidation(signer spectypes.OperatorID, share *ssvtypes.SSVShare) error { + if signer == 0 { + return ErrZeroSigner + } + + if !slices.ContainsFunc(share.Committee, mv.containsSignerFunc(signer)) { + return ErrSignerNotInCommittee + } + + return nil +} + +func (mv *messageValidator) validateSlotTime(messageSlot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) error { + if mv.earlyMessage(messageSlot, receivedAt) { + return ErrEarlyMessage + } + + if lateness := mv.lateMessage(messageSlot, role, receivedAt); lateness > 0 { + e := ErrLateMessage + e.got = fmt.Sprintf("late by %v", lateness) + return e + } + + return nil +} + +func (mv *messageValidator) earlyMessage(slot phase0.Slot, receivedAt time.Time) bool { + return mv.netCfg.Beacon.GetSlotEndTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Add(-clockErrorTolerance).Before(mv.netCfg.Beacon.GetSlotStartTime(slot)) +} + +func (mv *messageValidator) lateMessage(slot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) time.Duration { + var ttl phase0.Slot + switch role { + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + ttl = 1 + lateSlotAllowance + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: + ttl = 32 + lateSlotAllowance + case spectypes.BNRoleValidatorRegistration: + return 0 + } + + deadline := mv.netCfg.Beacon.GetSlotStartTime(slot + ttl). + Add(lateMessageMargin).Add(clockErrorTolerance) + + return mv.netCfg.Beacon.GetSlotStartTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Sub(deadline) +} + +func (mv *messageValidator) consensusState(messageID spectypes.MessageID) *ConsensusState { + id := ConsensusID{ + PubKey: phase0.BLSPubKey(messageID.GetPubKey()), + Role: messageID.GetRoleType(), + } + + if _, ok := mv.index.Load(id); !ok { + cs := &ConsensusState{ + Signers: hashmap.New[spectypes.OperatorID, *SignerState](), + } + mv.index.Store(id, cs) + } + + cs, _ := mv.index.Load(id) + return cs.(*ConsensusState) +} diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go new file mode 100644 index 0000000000..b307e05049 --- /dev/null +++ b/message/validation/validation_test.go @@ -0,0 +1,1774 @@ +package validation + +import ( + "bytes" + "encoding/hex" + "math" + "testing" + "time" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/herumi/bls-eth-go-binary/bls" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pspb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/stretchr/testify/require" + eth2types "github.com/wealdtech/go-eth2-types/v2" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" +) + +func Test_ValidateSSVMessage(t *testing.T) { + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + const validatorIndex = 123 + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + Index: validatorIndex, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + netCfg := networkconfig.TestNetwork + + roleAttester := spectypes.BNRoleAttester + + // Message validation happy flow, messages are not ignored or rejected and there are no errors + t.Run("happy flow", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + }) + + // Make sure messages are incremented and throw an ignore message if more than 1 for a commit + t.Run("message counts", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + state := validator.consensusState(msgID) + for i := spectypes.OperatorID(1); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedMsg, err := signedMsg.Encode() + require.NoError(t, err) + + ssvMsg := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMsg, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.NoError(t, err) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + state1 := state.GetSignerState(1) + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Proposal: 1}, state1.MessageCounts) + for i := spectypes.OperatorID(2); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg = spectestingutils.TestingPrepareMessageWithParams(ks.Shares[1], 1, 2, height, spectestingutils.TestingIdentifier, spectestingutils.TestingQBFTRootData) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.NoError(t, err) + + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 2, state1.Round) + require.EqualValues(t, MessageCounts{Prepare: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMultiSignerMessageWithHeight([]*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1, Decided: 1}, state1.MessageCounts) + }) + + // Send a pubsub message with no data should cause an error + t.Run("pubsub message has no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + pmsg := &pubsub.Message{} + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err := validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorIs(t, err, ErrPubSubMessageHasNoData) + }) + + // Send a pubsub message where there is too much data should cause an error + t.Run("pubsub data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: bytes.Repeat([]byte{1}, 10_000_000), + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + e := ErrPubSubDataTooBig + e.got = 10_000_000 + require.ErrorIs(t, err, e) + }) + + // Send a malformed pubsub message (empty message) should return an error + t.Run("empty pubsub message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: []byte{1}, + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + }) + + // Send a message with incorrect data (unable to decode incorrect message type) + t.Run("bad data format", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{1}, 500), + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send a message with no data should return an error + t.Run("no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{}, + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + require.ErrorIs(t, err, ErrEmptyData) + + message = &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: nil, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorIs(t, err, ErrEmptyData) + }) + + // Send a message where there is too much data should cause an error + t.Run("data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + const tooBigMsgSize = maxMessageSize * 2 + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, tooBigMsgSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrSSVDataTooBig + expectedErr.got = tooBigMsgSize + expectedErr.want = maxMessageSize + require.ErrorIs(t, err, expectedErr) + }) + + // Send exact allowed data size amount but with invalid data (fails to decode) + t.Run("data size borderline / malformed message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, maxMessageSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send an invalid SSV message type returns an error + t.Run("invalid SSV message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: math.MaxUint64, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{0x1}, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrUnknownSSVMessageType.Error()) + }) + + // Empty validator public key returns an error + t.Run("empty validator public key", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, spectypes.ValidatorPK{}, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrDeserializePublicKey.Error()) + }) + + // Generate random validator and validate it is unknown to the network + t.Run("unknown validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + sk, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, sk.PublicKey().Marshal(), roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrUnknownValidator + expectedErr.got = hex.EncodeToString(sk.PublicKey().Marshal()) + require.ErrorIs(t, err, expectedErr) + }) + + // Make sure messages are dropped if on the incorrect network + t.Run("wrong domain", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + wrongDomain := spectypes.DomainType{math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8} + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(wrongDomain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrWrongDomain + expectedErr.got = hex.EncodeToString(wrongDomain[:]) + expectedErr.want = hex.EncodeToString(netCfg.Domain[:]) + require.ErrorIs(t, err, expectedErr) + }) + + // Send message with a value that refers to a non-existent role + t.Run("invalid role", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, math.MaxUint64), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrInvalidRole) + }) + + // Perform validator registration with a consensus type message will give an error + t.Run("consensus validator registration", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleValidatorRegistration), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrConsensusValidatorRegistration) + }) + + // Ignore messages related to a validator that is liquidated + t.Run("liquidated validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + liquidatedSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + liquidatedShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: true, + }, + } + liquidatedShare.ValidatorPubKey = liquidatedSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, liquidatedShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, liquidatedShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrValidatorLiquidated + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, liquidatedShare.ValidatorPubKey)) + }) + + // Ignore messages related to a validator that is not active + t.Run("inactive validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + inactiveShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateUnknown, + }, + Liquidated: false, + }, + } + inactiveShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, inactiveShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, inactiveShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrValidatorNotAttesting + expectedErr.got = eth2apiv1.ValidatorStateUnknown.String() + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, inactiveShare.ValidatorPubKey)) + }) + + // Unable to process a message with a validator that is not on the network + t.Run("no share metadata", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + noMetadataSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + noMetadataShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: nil, + Liquidated: false, + }, + } + noMetadataShare.ValidatorPubKey = noMetadataSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, noMetadataShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, noMetadataShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoShareMetadata) + + require.NoError(t, ns.Shares().Delete(nil, noMetadataShare.ValidatorPubKey)) + }) + + // Receive error if more than 2 attestation duties in an epoch + t.Run("too many duties", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+4) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+8) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) + }) + + // Throw error if getting a message for proposal and see there is no message from beacon + t.Run("no proposal duties", func(t *testing.T) { + const epoch = 1 + slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) + height := specqbft.Height(slot) + + dutyStore := dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex+1, ð2apiv1.ProposerDuty{}, true) + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleProposer), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + require.ErrorContains(t, err, ErrNoDuty.Error()) + + dutyStore = dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) + validator = NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + require.NoError(t, err) + }) + + // Get error when receiving a message with over 13 partial signatures + t.Run("partial message too big", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + for i := 0; i < 13; i++ { + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + } + + _, err := msg.Encode() + require.ErrorContains(t, err, "max expected 13 and 14 found") + }) + + // Get error when receiving message from operator who is not affiliated with the validator + t.Run("signer ID not in committee", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 5, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrSignerNotInCommittee) + }) + + // Get error when receiving message from operator who is non-existent (operator id 0) + t.Run("partial zero signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 0, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving partial signature message from operator who is the incorrect signer + t.Run("partial inconsistent signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages[0].Signer = 2 + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrUnexpectedSigner + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Receive error when receiving a duplicated partial signature message + t.Run("partial duplicated message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrDuplicatedPartialSignatureMessage) + }) + + // Receive error when "partialSignatureMessages" does not contain any "partialSignatureMessage" + t.Run("no partial signature messages", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = []*spectypes.PartialSignatureMessage{} + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoPartialMessages) + }) + + // Receive error when the partial signature message is not enough bytes + t.Run("partial wrong signature size", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = []byte{1} + + encoded, err := msg.Encode() + require.ErrorContains(t, err, "bytes array does not have the correct length") + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Get error when receiving a partial signature message with an invalid signature + t.Run("partial wrong signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = bytes.Repeat([]byte{1}, 96) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrInvalidSignature.Error()) + }) + + // Run partial message type validation tests + t.Run("partial message type validation", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(162304) + + // Check happy flow of a duty for each role + t.Run("valid", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleAggregator: {spectypes.PostConsensusPartialSig, spectypes.SelectionProofPartialSig}, + spectypes.BNRoleProposer: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.PostConsensusPartialSig, spectypes.ContributionProofs}, + spectypes.BNRoleValidatorRegistration: {spectypes.ValidatorRegistrationPartialSig}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + innerSig, r, err := spectestingutils.NewTestingKeyManager().SignBeaconObject(spectypes.SSZUint64(spectestingutils.TestingDutyEpoch), phase0.Domain{}, ks.Shares[1].GetPublicKey().Serialize(), phase0.DomainType{}) + require.NoError(t, err) + + innerMsg := spectypes.PartialSignatureMessages{ + Type: msgType, + Messages: []*spectypes.PartialSignatureMessage{ + { + PartialSignature: innerSig, + SigningRoot: r, + Signer: 1, + }, + }, + } + + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(innerMsg, spectypes.PartialSignatureType, ks.Shares[1].GetPublicKey().Serialize()) + require.NoError(t, err) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: innerMsg, + Signature: sig, + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + } + } + }) + + // Get error when receiving a message with an incorrect message type + t.Run("invalid message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: math.MaxUint64, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrUnknownPartialMessageType.Error()) + }) + + // Get error when sending an unexpected message type for the required duty (sending randao for attestor duty) + t.Run("mismatch", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleAggregator: {spectypes.RandaoPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleProposer: {spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleValidatorRegistration: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: msgType, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrPartialSignatureTypeRoleMismatch.Error()) + } + } + }) + }) + + // Get error when receiving QBFT message with an invalid type + t.Run("invalid QBFT message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msg := &specqbft.Message{ + MsgType: math.MaxUint64, + Height: height, + Round: specqbft.FirstRound, + Identifier: spectestingutils.TestingIdentifier, + Root: spectestingutils.TestingQBFTRootData, + } + signedMsg := spectestingutils.SignQBFTMsg(ks.Shares[1], 1, msg) + + encodedValidSignedMessage, err := signedMsg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrUnknownQBFTMessageType + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving an incorrect signature size (too small) + t.Run("wrong signature size", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signature = []byte{0x1} + + _, err := validSignedMessage.Encode() + require.Error(t, err) + }) + + // Initialize signature tests + t.Run("zero signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Get error when receiving a consensus message with a zero signature + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + validSignedMessage.Signature = zeroSignature[:] + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSignature) + }) + + // Get error when receiving a consensus message with a zero signature + t.Run("partial signature message", func(t *testing.T) { + partialSigMessage := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + partialSigMessage.Signature = zeroSignature[:] + + encoded, err := partialSigMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorIs(t, err, ErrZeroSignature) + }) + }) + + // Get error when receiving a message with an empty list of signers + t.Run("no signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signers = []spectypes.OperatorID{} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoSigners) + }) + + // Initialize no signer tests + t.Run("zero signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + zeroSignerKS := spectestingutils.Testing7SharesSet() + zeroSignerShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(zeroSignerKS), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + zeroSignerShare.Committee[0].OperatorID = 0 + zeroSignerShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, zeroSignerShare)) + + // Get error when receiving a consensus message with a zero signer + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessage(zeroSignerKS.Shares[1], 1) + validSignedMessage.Signers = []spectypes.OperatorID{0} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving a partial message with a zero signer + t.Run("partial signature message", func(t *testing.T) { + partialSignatureMessage := spectestingutils.PostConsensusAttestationMsg(zeroSignerKS.Shares[1], 1, specqbft.Height(slot)) + partialSignatureMessage.Signer = 0 + + encoded, err := partialSignatureMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + require.NoError(t, ns.Shares().Delete(nil, zeroSignerShare.ValidatorPubKey)) + }) + + // Get error when receiving a message with duplicated signers + t.Run("non unique signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrDuplicatedSigner) + }) + + // Get error when receiving a message with non-sorted signers + t.Run("signers not sorted", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{3, 2, 1} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrSignersNotSorted) + }) + + // Get error when receiving message from non quorum size amount of signers + t.Run("wrong signers length", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrWrongSignersLength + expectedErr.got = 2 + expectedErr.want = "between 3 and 4" + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a non decided message with multiple signers + t.Run("non decided with multiple signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingMultiSignerProposalMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrNonDecidedWithMultipleSigners + expectedErr.got = 3 + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a proposal message with an invalid signature (random bytes) + t.Run("wrong signed signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + validSignedMessage.Signature = bytes.Repeat([]byte{1}, 96) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrInvalidSignature.Error()) + }) + + // Send late message for all roles and receive late message error + t.Run("late message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + tests := map[spectypes.BeaconRole]time.Time{ + spectypes.BNRoleAttester: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAttester)), + spectypes.BNRoleAggregator: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAggregator)), + spectypes.BNRoleProposer: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), + spectypes.BNRoleSyncCommittee: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommittee)), + spectypes.BNRoleSyncCommitteeContribution: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommitteeContribution)), + } + + for role, receivedAt := range tests { + role, receivedAt := role, receivedAt + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrLateMessage.Error()) + }) + } + }) + + // Send early message for all roles before the duty start and receive early message error + t.Run("early message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot - 1) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrEarlyMessage) + }) + + // Send message from non-leader acting as a leader should receive an error + t.Run("not a leader", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[2], 2, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrSignerNotLeader + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Send wrong size of data (8 bytes) for a prepare justification message should receive an error + t.Run("malformed prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.PrepareJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) + }) + + // Send prepare justification message without a proposal message should receive an error + t.Run("non-proposal with prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + nil, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingRoundChangeMessage(ks.Shares[1], spectypes.OperatorID(1)), + })) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrUnexpectedPrepareJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message without a proposal message should receive an error + t.Run("non-proposal with round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingPrepareMessage(ks.Shares[1], spectypes.OperatorID(1)), + }), + nil, + ) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrUnexpectedRoundChangeJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message with a malformed message (1 byte) should receive an error + t.Run("malformed round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.RoundChangeJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) + }) + + // Send message root hash that doesnt match the expected root hash should receive an error + t.Run("wrong root hash", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.FullData = []byte{1} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrInvalidHash + require.ErrorIs(t, err, expectedErr) + }) + + // Receive proposal from same operator twice with different messages (same round) should receive an error + t.Run("double proposal with different data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + signed2.FullData = []byte{1} + signed2.Message.Root, err = specqbft.HashDataRoot(signed2.FullData) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrDuplicatedProposalWithDifferentData + require.ErrorIs(t, err, expectedErr) + }) + + // Receive prepare from same operator twice with different messages (same round) should receive an error + t.Run("double prepare", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive commit from same operator twice with different messages (same round) should receive an error + t.Run("double commit", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive round change from same operator twice with different messages (same round) should receive an error + t.Run("double round change", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 1, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive too many decided messages should receive an error + t.Run("too many decided", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + signed := spectestingutils.TestingCommitMultiSignerMessageWithRound( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, 1) + encodedSigned, err := signed.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedSigned, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + for i := 0; i < maxDecidedCount(len(share.Committee)); i++ { + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + } + + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "decided, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 8, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive message from a round that is too high for that epoch should receive an error + t.Run("round too high", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + tests := map[spectypes.BeaconRole]specqbft.Round{ + spectypes.BNRoleAttester: 13, + spectypes.BNRoleAggregator: 13, + spectypes.BNRoleProposer: 7, + spectypes.BNRoleSyncCommittee: 7, + spectypes.BNRoleSyncCommitteeContribution: 7, + } + + for role, round := range tests { + role, round := role, round + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, round) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(validator.waitAfterSlotStart(role)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorContains(t, err, ErrRoundTooHigh.Error()) + }) + } + }) + + // Receive message from a round that is incorrect for current epoch should receive an error + t.Run("round already advanced", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 2) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 1) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorContains(t, err, ErrRoundAlreadyAdvanced.Error()) + }) + + // Initialize tests for testing when sending a message with a slot before the current one + t.Run("slot already advanced", func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Send a consensus message with a slot before the current one should cause an error + t.Run("consensus message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + signedMessage := spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + + // Send a partial signature message with a slot before the current one should cause an error + t.Run("partial signature message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height+1) + message.Message.Slot = phase0.Slot(height) + 1 + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err := message.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + message = spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height) + message.Message.Slot = phase0.Slot(height) + sig, err = spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err = message.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + }) + + // Receive an event message from an operator that is not myself should receive an error + t.Run("event message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + eventMsg := &ssvtypes.EventMsg{} + encoded, err := eventMsg.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: ssvmessage.SSVEventMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorIs(t, err, ErrEventMessage) + }) +} diff --git a/monitoring/grafana/dashboard_msg_validation.json b/monitoring/grafana/dashboard_msg_validation.json new file mode 100644 index 0000000000..8ea0bd8f08 --- /dev/null +++ b/monitoring/grafana/dashboard_msg_validation.json @@ -0,0 +1,2175 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 144, + "iteration": 1695134055974, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 12, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Total", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Ignored", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Rejected", + "refId": "C" + } + ], + "title": "Message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#F2495C", + "mode": "palette-classic", + "seriesBy": "last" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 0, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 3, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Rejected", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Ignored", + "refId": "B" + } + ], + "title": "Ignore/Reject Rate", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 20, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 22, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 23, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 24, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "duplicated proposal with different data", + "late message", + "message round is too far from estimated", + "no duty for this epoch", + "round is too high for this role", + "signer has already advanced to a later slot", + "too many messages of same type per round", + "unknown validator", + "validator is not attesting" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 4, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right", + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation{instance=~\"$instance.*\", reason!=\"\"}[$__interval])) by (reason)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{reason}}", + "refId": "A" + } + ], + "title": "Validation Failure Reason", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 5, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_ssv_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by SSV type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 6, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by QBFT type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 7, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers=\"1\"}[$__interval])) by (signers)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Commit", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers!=\"1\"}[$__interval])) by (signers))", + "hide": false, + "interval": "", + "legendFormat": "Decided", + "refId": "B" + } + ], + "title": "Commit messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "id": 19, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_in_committee{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "in committee", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"decided\"}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "non-committee decided", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"non-decided\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "non-committee non-decided", + "refId": "C" + } + ], + "title": "Committee belonging RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 64 + }, + "id": 9, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Over panel interval", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 72 + }, + "id": 13, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Bytes", + "refId": "A" + } + ], + "title": "Total bytes received RPS (incoming messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 80 + }, + "id": 14, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 15, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_signature_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_signature_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Signature validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 96 + }, + "id": 17, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_incoming{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Incoming, RPS", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_outgoing{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Outgoing, RPS", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_drops{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Dropped, RPS", + "refId": "C" + } + ], + "title": "Queue message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 104 + }, + "id": 18, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_size{instance=~\"$instance.*\"}", + "hide": false, + "interval": "", + "legendFormat": "Size", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_capacity{instance=~\"$instance.*\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Capacity", + "refId": "G" + } + ], + "title": "Queue size/capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 112 + }, + "id": 16, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_time_in_queue_seconds_sum{instance=~\"$instance.*\"}[$__interval])) by (instance)\n/\nsum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval])) by (instance)\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message time in queue (seconds)", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 34, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + "hide": 1, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [ + { + "selected": false, + "text": "ssv-node-v2-1", + "value": "ssv-node-v2-1" + }, + { + "selected": false, + "text": "ssv-node-v2-2", + "value": "ssv-node-v2-2" + }, + { + "selected": false, + "text": "ssv-node-v2-3", + "value": "ssv-node-v2-3" + }, + { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + { + "selected": false, + "text": "ssv-node-v2-5", + "value": "ssv-node-v2-5" + }, + { + "selected": false, + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" + }, + { + "selected": false, + "text": "ssv-node-v2-7", + "value": "ssv-node-v2-7" + }, + { + "selected": false, + "text": "ssv-node-v2-8", + "value": "ssv-node-v2-8" + } + ], + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Message Validation", + "uid": "DppaYPgSk", + "version": 42, + "weekStart": "" +} \ No newline at end of file diff --git a/monitoring/metricsreporter/metrics_reporter.go b/monitoring/metricsreporter/metrics_reporter.go index 859d46e518..01227e94c6 100644 --- a/monitoring/metricsreporter/metrics_reporter.go +++ b/monitoring/metricsreporter/metrics_reporter.go @@ -4,12 +4,16 @@ import ( "crypto/sha256" "fmt" "strconv" + "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ) // TODO: implement all methods @@ -33,6 +37,10 @@ const ( validatorPending = float64(8) validatorRemoved = float64(9) validatorUnknown = float64(10) + + messageAccepted = "accepted" + messageIgnored = "ignored" + messageRejected = "rejected" ) var ( @@ -65,6 +73,70 @@ var ( Name: "ssv:exporter:operator_index", Help: "operator footprint", }, []string{"pubKey", "index"}) + messageValidationResult = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation", + Help: "Message validation result", + }, []string{"status", "reason", "role", "round"}) + messageValidationSSVType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_ssv_type", + Help: "SSV message type", + }, []string{"type"}) + messageValidationConsensusType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_consensus_type", + Help: "Consensus message type", + }, []string{"type", "signers"}) + messageValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_validation_duration_seconds", + Help: "Message validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + signatureValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_signature_validation_duration_seconds", + Help: "Signature validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + messageSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_size", + Help: "Message size", + Buckets: []float64{100, 500, 1_000, 5_000, 10_000, 50_000, 100_000, 500_000, 1_000_000, 5_000_000}, + }, []string{}) + activeMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv:p2p:pubsub:msg:val:active", + Help: "Count active message validation", + }, []string{"topic"}) + incomingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_incoming", + Help: "The amount of message incoming to the validator's msg queue", + }, []string{"msg_id"}) + outgoingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_outgoing", + Help: "The amount of message outgoing from the validator's msg queue", + }, []string{"msg_id"}) + droppedQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_drops", + Help: "The amount of message dropped from the validator's msg queue", + }, []string{"msg_id"}) + messageQueueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_size", + Help: "Size of message queue", + }, []string{}) + messageQueueCapacity = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_capacity", + Help: "Capacity of message queue", + }, []string{}) + messageTimeInQueue = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_time_in_queue_seconds", + Help: "Time message spent in queue (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.050, 0.100, 0.500, 1, 5, 10, 60}, + }, []string{"msg_id"}) + inCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_in_committee", + Help: "The amount of messages in committee", + }, []string{"ssv_msg_type", "decided"}) + nonCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_non_committee", + Help: "The amount of messages not in committee", + }, []string{"ssv_msg_type", "decided"}) ) type MetricsReporter struct { @@ -84,9 +156,26 @@ func New(opts ...Option) *MetricsReporter { allMetrics := []prometheus.Collector{ ssvNodeStatus, executionClientStatus, + executionClientLastFetchedBlock, validatorStatus, eventProcessed, eventProcessingFailed, + operatorIndex, + messageValidationResult, + messageValidationSSVType, + messageValidationConsensusType, + messageValidationDuration, + signatureValidationDuration, + messageSize, + activeMsgValidation, + incomingQueueMessages, + outgoingQueueMessages, + droppedQueueMessages, + messageQueueSize, + messageQueueCapacity, + messageTimeInQueue, + inCommitteeMessages, + nonCommitteeMessages, } for i, c := range allMetrics { @@ -102,77 +191,183 @@ func New(opts ...Option) *MetricsReporter { return &MetricsReporter{} } -func (m MetricsReporter) SSVNodeHealthy() { +func (m *MetricsReporter) SSVNodeHealthy() { ssvNodeStatus.Set(ssvNodeHealthy) } -func (m MetricsReporter) SSVNodeNotHealthy() { +func (m *MetricsReporter) SSVNodeNotHealthy() { ssvNodeStatus.Set(ssvNodeNotHealthy) } -func (m MetricsReporter) ExecutionClientReady() { +func (m *MetricsReporter) ExecutionClientReady() { executionClientStatus.Set(executionClientOK) } -func (m MetricsReporter) ExecutionClientSyncing() { +func (m *MetricsReporter) ExecutionClientSyncing() { executionClientStatus.Set(executionClientSyncing) } -func (m MetricsReporter) ExecutionClientFailure() { +func (m *MetricsReporter) ExecutionClientFailure() { executionClientStatus.Set(executionClientFailure) } -func (m MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { +func (m *MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { executionClientLastFetchedBlock.Set(float64(block)) } -func (m MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { +func (m *MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { pkHash := fmt.Sprintf("%x", sha256.Sum256(publicKey)) operatorIndex.WithLabelValues(pkHash, strconv.FormatUint(operatorID, 10)).Set(float64(operatorID)) } -func (m MetricsReporter) ValidatorInactive(publicKey []byte) { +func (m *MetricsReporter) ValidatorInactive(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorInactive) } -func (m MetricsReporter) ValidatorNoIndex(publicKey []byte) { +func (m *MetricsReporter) ValidatorNoIndex(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNoIndex) } -func (m MetricsReporter) ValidatorError(publicKey []byte) { +func (m *MetricsReporter) ValidatorError(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorError) } -func (m MetricsReporter) ValidatorReady(publicKey []byte) { +func (m *MetricsReporter) ValidatorReady(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorReady) } -func (m MetricsReporter) ValidatorNotActivated(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotActivated(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotActivated) } -func (m MetricsReporter) ValidatorExiting(publicKey []byte) { +func (m *MetricsReporter) ValidatorExiting(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorExiting) } -func (m MetricsReporter) ValidatorSlashed(publicKey []byte) { +func (m *MetricsReporter) ValidatorSlashed(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorSlashed) } -func (m MetricsReporter) ValidatorNotFound(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotFound(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotFound) } -func (m MetricsReporter) ValidatorPending(publicKey []byte) { +func (m *MetricsReporter) ValidatorPending(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorPending) } -func (m MetricsReporter) ValidatorRemoved(publicKey []byte) { +func (m *MetricsReporter) ValidatorRemoved(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorRemoved) } -func (m MetricsReporter) ValidatorUnknown(publicKey []byte) { +func (m *MetricsReporter) ValidatorUnknown(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorUnknown) } -func (m MetricsReporter) EventProcessed(eventName string) { +func (m *MetricsReporter) EventProcessed(eventName string) { eventProcessed.WithLabelValues(eventName).Inc() } -func (m MetricsReporter) EventProcessingFailed(eventName string) { +func (m *MetricsReporter) EventProcessingFailed(eventName string) { eventProcessingFailed.WithLabelValues(eventName).Inc() } // TODO implement -func (m MetricsReporter) LastBlockProcessed(uint64) {} -func (m MetricsReporter) LogsProcessingError(error) {} +func (m *MetricsReporter) LastBlockProcessed(uint64) {} +func (m *MetricsReporter) LogsProcessingError(error) {} + +func (m *MetricsReporter) MessageAccepted( + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageAccepted, + "", + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageIgnored( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageIgnored, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageRejected( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageRejected, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) SSVMessageType(msgType spectypes.MsgType) { + messageValidationSSVType.WithLabelValues(ssvmessage.MsgTypeToString(msgType)).Inc() +} + +func (m *MetricsReporter) ConsensusMsgType(msgType specqbft.MessageType, signers int) { + messageValidationConsensusType.WithLabelValues(ssvmessage.QBFTMsgTypeToString(msgType), strconv.Itoa(signers)).Inc() +} + +func (m *MetricsReporter) MessageValidationDuration(duration time.Duration, labels ...string) { + messageValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) SignatureValidationDuration(duration time.Duration, labels ...string) { + signatureValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) MessageSize(size int) { + messageSize.WithLabelValues().Observe(float64(size)) +} + +func (m *MetricsReporter) ActiveMsgValidation(topic string) { + activeMsgValidation.WithLabelValues(topic).Inc() +} + +func (m *MetricsReporter) ActiveMsgValidationDone(topic string) { + activeMsgValidation.WithLabelValues(topic).Dec() +} + +func (m *MetricsReporter) IncomingQueueMessage(messageID spectypes.MessageID) { + incomingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) OutgoingQueueMessage(messageID spectypes.MessageID) { + outgoingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) DroppedQueueMessage(messageID spectypes.MessageID) { + droppedQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) MessageQueueSize(size int) { + messageQueueSize.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageQueueCapacity(size int) { + messageQueueCapacity.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageTimeInQueue(messageID spectypes.MessageID, d time.Duration) { + messageTimeInQueue.WithLabelValues(messageID.String()).Observe(d.Seconds()) +} + +func (m *MetricsReporter) InCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + inCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} + +func (m *MetricsReporter) NonCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + nonCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} diff --git a/network/network.go b/network/network.go index 67af7476fb..f40678892c 100644 --- a/network/network.go +++ b/network/network.go @@ -1,19 +1,19 @@ package network import ( + "context" "io" "go.uber.org/zap" - spectypes "github.com/bloxapp/ssv-spec/types" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) // MessageRouter is accepting network messages and route them to the corresponding (internal) components type MessageRouter interface { // Route routes the given message, this function MUST NOT block - Route(logger *zap.Logger, message spectypes.SSVMessage) + Route(ctx context.Context, message *queue.DecodedSSVMessage) } // MessageRouting allows to register a MessageRouter diff --git a/network/p2p/config.go b/network/p2p/config.go index 77f1e599b5..935eaa4c2a 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -14,6 +14,8 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/monitoring/metricsreporter" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/networkconfig" @@ -62,6 +64,10 @@ type Config struct { NodeStorage storage.Storage // Network defines a network configuration. Network networkconfig.NetworkConfig + // MessageValidator validates incoming messages. + MessageValidator validation.MessageValidator + // Metrics report metrics. + Metrics *metricsreporter.MetricsReporter PubsubMsgCacheTTL time.Duration `yaml:"PubsubMsgCacheTTL" env:"PUBSUB_MSG_CACHE_TTL" env-description:"How long a message ID will be remembered as seen"` PubsubOutQueueSize int `yaml:"PubsubOutQueueSize" env:"PUBSUB_OUT_Q_SIZE" env-description:"The size that we assign to the outbound pubsub message queue"` diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 4f27098061..e665e41143 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -7,18 +7,17 @@ import ( "time" "github.com/cornelk/hashmap" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - connmgrcore "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" libp2pdiscbackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections" @@ -56,14 +55,15 @@ type p2pNetwork struct { interfaceLogger *zap.Logger // struct logger to log in interface methods that do not accept a logger cfg *Config - host host.Host - streamCtrl streams.StreamController - idx peers.Index - disc discovery.Service - topicsCtrl topics.Controller - msgRouter network.MessageRouter - msgResolver topics.MsgPeersResolver - connHandler connections.ConnHandler + host host.Host + streamCtrl streams.StreamController + idx peers.Index + disc discovery.Service + topicsCtrl topics.Controller + msgRouter network.MessageRouter + msgResolver topics.MsgPeersResolver + msgValidator validation.MessageValidator + connHandler connections.ConnHandler state int32 @@ -90,6 +90,7 @@ func New(logger *zap.Logger, cfg *Config) network.P2PNetwork { interfaceLogger: logger, cfg: cfg, msgRouter: cfg.Router, + msgValidator: cfg.MessageValidator, state: stateClosed, activeValidators: hashmap.New[string, validatorStatus](), nodeStorage: cfg.NodeStorage, @@ -172,7 +173,7 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { } // Create & start ConcurrentSyncer. - syncer := syncing.NewConcurrent(n.ctx, syncing.New(n), 16, syncing.DefaultTimeouts, nil) + syncer := syncing.NewConcurrent(n.ctx, syncing.New(n, n.msgValidator), 16, syncing.DefaultTimeouts, nil) go syncer.Run(logger) n.syncer = syncer diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 708deb79d3..d88be4af21 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -1,6 +1,7 @@ package p2pv1 import ( + "context" "encoding/hex" "fmt" @@ -11,12 +12,12 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/records" - - "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) type validatorStatus int @@ -133,8 +134,8 @@ func (n *p2pNetwork) subscribe(logger *zap.Logger, pk spectypes.ValidatorPK) err } // handleIncomingMessages reads messages from the given channel and calls the router, note that this function blocks. -func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, msg *pubsub.Message) error { - return func(topic string, msg *pubsub.Message) error { +func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(ctx context.Context, topic string, msg *pubsub.Message) error { + return func(ctx context.Context, topic string, msg *pubsub.Message) error { if n.msgRouter == nil { logger.Debug("msg router is not configured") return nil @@ -143,26 +144,28 @@ func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, return nil } - var ssvMsg *spectypes.SSVMessage + var decodedMsg *queue.DecodedSSVMessage if msg.ValidatorData != nil { - m, ok := msg.ValidatorData.(spectypes.SSVMessage) + m, ok := msg.ValidatorData.(*queue.DecodedSSVMessage) if ok { - ssvMsg = &m + decodedMsg = m } } - if ssvMsg == nil { + if decodedMsg == nil { return errors.New("message was not decoded") } - p2pID := ssvMsg.GetID().String() + p2pID := decodedMsg.GetID().String() // logger.With( // zap.String("pubKey", hex.EncodeToString(ssvMsg.MsgID.GetPubKey())), // zap.String("role", ssvMsg.MsgID.GetRoleType().String()), // ).Debug("handlePubsubMessages") - metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(ssvMsg.MsgType)).Inc() - n.msgRouter.Route(logger, *ssvMsg) + metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(decodedMsg.MsgType)).Inc() + + n.msgRouter.Route(ctx, decodedMsg) + return nil } } diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 8ffe70656b..10a0e7cbc3 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -276,14 +276,12 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { } func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { - cfg := &topics.PububConfig{ - Host: n.host, - TraceLog: n.cfg.PubSubTrace, - MsgValidatorFactory: func(s string) topics.MsgValidatorFunc { - return topics.NewSSVMsgValidator() - }, - MsgHandler: n.handlePubsubMessages(logger), - ScoreIndex: n.idx, + cfg := &topics.PubSubConfig{ + Host: n.host, + TraceLog: n.cfg.PubSubTrace, + MsgValidator: n.msgValidator, + MsgHandler: n.handlePubsubMessages(logger), + ScoreIndex: n.idx, //Discovery: n.disc, OutboundQueueSize: n.cfg.PubsubOutQueueSize, ValidationQueueSize: n.cfg.PubsubValidationQueueSize, @@ -302,10 +300,12 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { go cfg.MsgIDHandler.Start() // run GC every 3 minutes to clear old messages async.RunEvery(n.ctx, time.Minute*3, midHandler.GC) - _, tc, err := topics.NewPubsub(n.ctx, logger, cfg) + + _, tc, err := topics.NewPubSub(n.ctx, logger, cfg) if err != nil { return errors.Wrap(err, "could not setup pubsub") } + n.topicsCtrl = tc logger.Debug("topics controller is ready") return nil diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 6b810c7d41..a43e199615 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -7,31 +7,34 @@ import ( "math/rand" "time" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - - "github.com/multiformats/go-multistream" - "github.com/bloxapp/ssv-spec/qbft" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" libp2p_protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multistream" "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func (n *p2pNetwork) SyncHighestDecided(mid spectypes.MessageID) error { - return n.syncer.SyncHighestDecided(context.Background(), n.interfaceLogger, mid, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) + ctx := context.TODO() // TODO: pass context to SyncHighestDecided + + return n.syncer.SyncHighestDecided(ctx, n.interfaceLogger, mid, func(msg *queue.DecodedSSVMessage) { + n.msgRouter.Route(ctx, msg) }) } func (n *p2pNetwork) SyncDecidedByRange(mid spectypes.MessageID, from, to qbft.Height) { + ctx := context.TODO() // TODO: pass context to SyncDecidedByRange + if !n.cfg.FullNode { return } @@ -61,8 +64,8 @@ func (n *p2pNetwork) SyncDecidedByRange(mid spectypes.MessageID, from, to qbft.H return } - err := n.syncer.SyncDecidedByRange(context.Background(), n.interfaceLogger, mid, from, to, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) + err := n.syncer.SyncDecidedByRange(ctx, n.interfaceLogger, mid, from, to, func(msg *queue.DecodedSSVMessage) { + n.msgRouter.Route(ctx, msg) }) if err != nil { n.interfaceLogger.Error("failed to sync decided by range", zap.Error(err)) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 9fc132d0ff..4aace4bc40 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -9,6 +9,8 @@ import ( "time" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -19,7 +21,6 @@ import ( "github.com/bloxapp/ssv/network" protcolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/protocol/v2/types" ) func TestGetMaxPeers(t *testing.T) { @@ -118,7 +119,7 @@ func TestP2pNetwork_Stream(t *testing.T) { pk, err := hex.DecodeString(pkHex) require.NoError(t, err) - mid := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + mid := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) rounds := []specqbft.Round{ 1, 1, 1, 1, 2, 2, @@ -235,21 +236,23 @@ func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes. }) } -func createNetworkAndSubscribe(t *testing.T, ctx context.Context, n int, pks ...string) (*LocalNet, []*dummyRouter, error) { +func createNetworkAndSubscribe(t *testing.T, ctx context.Context, nodes int, pks ...string) (*LocalNet, []*dummyRouter, error) { logger := logging.TestLogger(t) - ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), n, n/2-1, false) + ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), nodes, nodes/2-1, false) if err != nil { return nil, nil, err } - if len(ln.Nodes) != n { - return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), n) + if len(ln.Nodes) != nodes { + return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), nodes) } logger.Debug("created local network") - routers := make([]*dummyRouter, n) + routers := make([]*dummyRouter, nodes) for i, node := range ln.Nodes { - routers[i] = &dummyRouter{i: i} + routers[i] = &dummyRouter{ + i: i, + } node.UseMessageRouter(routers[i]) } @@ -299,9 +302,8 @@ type dummyRouter struct { i int } -func (r *dummyRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - c := atomic.AddUint64(&r.count, 1) - logger.Debug("got message", zap.Uint64("count", c)) +func (r *dummyRouter) Route(_ context.Context, _ *queue.DecodedSSVMessage) { + atomic.AddUint64(&r.count, 1) } func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { @@ -309,7 +311,7 @@ func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) signedMsg := &specqbft.SignedMessage{ Message: specqbft.Message{ MsgType: specqbft.CommitMsgType, diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index bcfa9ad311..70e862aaa7 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -12,12 +12,14 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections/mock" "github.com/bloxapp/ssv/network/testing" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/utils/format" "github.com/bloxapp/ssv/utils/rsaencryption" ) @@ -136,6 +138,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys MockGetPrivateKey: keys.OperatorKey, RegisteredOperatorPublicKeyPEMs: []string{}, } + cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork) p := New(logger, cfg) err = p.Setup(logger) diff --git a/network/syncing/syncer.go b/network/syncing/syncer.go index db36a94028..0ac532e1ae 100644 --- a/network/syncing/syncer.go +++ b/network/syncing/syncer.go @@ -10,14 +10,16 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/utils/tasks" ) //go:generate mockgen -package=mocks -destination=./mocks/syncer.go -source=./syncer.go // MessageHandler reacts to a message received from Syncer. -type MessageHandler func(msg spectypes.SSVMessage) +type MessageHandler func(msg *queue.DecodedSSVMessage) // Syncer handles the syncing of decided messages. type Syncer interface { @@ -43,13 +45,15 @@ type Network interface { } type syncer struct { - network Network + network Network + msgValidator validation.MessageValidator } // New returns a standard implementation of Syncer. -func New(network Network) Syncer { +func New(network Network, msgValidator validation.MessageValidator) Syncer { return &syncer{ - network: network, + network: network, + msgValidator: msgValidator, } } @@ -92,11 +96,21 @@ func (s *syncer) SyncHighestDecided( logger.Debug("could not encode signed message", zap.Error(err)) return false } - handler(spectypes.SSVMessage{ + + ssvMessage := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, Data: raw, - }) + } + + decodedMsg, _, err := s.msgValidator.ValidateSSVMessage(ssvMessage) + if err != nil { + logger.Debug("could not validate ssv message", zap.Error(err)) + return false + } + + handler(decodedMsg) + return false }) logger.Debug("synced last decided", zap.Uint64("highest_height", uint64(maxHeight)), zap.Int("messages", len(lastDecided))) @@ -134,11 +148,21 @@ func (s *syncer) SyncDecidedByRange( logger.Debug("could not encode signed message", zap.Error(err)) return nil } - handler(spectypes.SSVMessage{ + + ssvMessage := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, Data: raw, - }) + } + + decodedMsg, _, err := s.msgValidator.ValidateSSVMessage(ssvMessage) + if err != nil { + logger.Debug("could not validate ssv message", zap.Error(err)) + return nil + } + + handler(decodedMsg) + return nil }, ) diff --git a/network/syncing/syncer_test.go b/network/syncing/syncer_test.go index e0f99c3fb4..3bd01c5486 100644 --- a/network/syncing/syncer_test.go +++ b/network/syncing/syncer_test.go @@ -8,6 +8,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/network/syncing" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) type mockSyncer struct{} @@ -27,7 +28,7 @@ type mockMessageHandler struct { func newMockMessageHandler() *mockMessageHandler { m := &mockMessageHandler{} - m.handler = func(msg spectypes.SSVMessage) { + m.handler = func(msg *queue.DecodedSSVMessage) { m.calls++ } return m diff --git a/network/topics/controller.go b/network/topics/controller.go index 3ac1dea7e6..bbc9e3f821 100644 --- a/network/topics/controller.go +++ b/network/topics/controller.go @@ -37,7 +37,11 @@ type Controller interface { } // PubsubMessageHandler handles incoming messages -type PubsubMessageHandler func(string, *pubsub.Message) error +type PubsubMessageHandler func(context.Context, string, *pubsub.Message) error + +type messageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} // topicsCtrl implements Controller type topicsCtrl struct { @@ -45,25 +49,31 @@ type topicsCtrl struct { logger *zap.Logger // struct logger to implement i.Closer ps *pubsub.PubSub // scoreParamsFactory is a function that helps to set scoring params on topics - scoreParamsFactory func(string) *pubsub.TopicScoreParams - msgValidatorFactory func(string) MsgValidatorFunc - msgHandler PubsubMessageHandler - subFilter SubFilter + scoreParamsFactory func(string) *pubsub.TopicScoreParams + msgValidator messageValidator + msgHandler PubsubMessageHandler + subFilter SubFilter container *topicsContainer } // NewTopicsController creates an instance of Controller -func NewTopicsController(ctx context.Context, logger *zap.Logger, msgHandler PubsubMessageHandler, - msgValidatorFactory func(string) MsgValidatorFunc, subFilter SubFilter, pubSub *pubsub.PubSub, - scoreParams func(string) *pubsub.TopicScoreParams) Controller { +func NewTopicsController( + ctx context.Context, + logger *zap.Logger, + msgHandler PubsubMessageHandler, + msgValidator messageValidator, + subFilter SubFilter, + pubSub *pubsub.PubSub, + scoreParams func(string) *pubsub.TopicScoreParams, +) Controller { ctrl := &topicsCtrl{ - ctx: ctx, - logger: logger, - ps: pubSub, - scoreParamsFactory: scoreParams, - msgValidatorFactory: msgValidatorFactory, - msgHandler: msgHandler, + ctx: ctx, + logger: logger, + ps: pubSub, + scoreParamsFactory: scoreParams, + msgValidator: msgValidator, + msgHandler: msgHandler, subFilter: subFilter, } @@ -171,7 +181,7 @@ func (ctrl *topicsCtrl) Broadcast(name string, data []byte, timeout time.Duratio func (ctrl *topicsCtrl) Unsubscribe(logger *zap.Logger, name string, hard bool) error { ctrl.container.Unsubscribe(name) - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { err := ctrl.ps.UnregisterTopicValidator(name) if err != nil { logger.Debug("could not unregister msg validator", zap.String("topic", name), zap.Error(err)) @@ -207,7 +217,9 @@ func (ctrl *topicsCtrl) start(logger *zap.Logger, name string, sub *pubsub.Subsc func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) error { ctx, cancel := context.WithCancel(ctrl.ctx) defer cancel() + topicName := sub.Topic() + logger = logger.With(zap.String("topic", topicName)) logger.Debug("start listening to topic") for ctx.Err() == nil { @@ -235,7 +247,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err ).Inc() } - if err := ctrl.msgHandler(topicName, msg); err != nil { + if err := ctrl.msgHandler(ctx, topicName, msg); err != nil { logger.Debug("could not handle msg", zap.Error(err)) } } @@ -244,7 +256,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err // setupTopicValidator registers the topic validator func (ctrl *topicsCtrl) setupTopicValidator(name string) error { - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { // first try to unregister in case there is already a msg validator for that topic (e.g. fork scenario) _ = ctrl.ps.UnregisterTopicValidator(name) @@ -252,7 +264,7 @@ func (ctrl *topicsCtrl) setupTopicValidator(name string) error { // Optional: set a timeout for message validation // opts = append(opts, pubsub.WithValidatorTimeout(time.Second)) - err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidatorFactory(name), opts...) + err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidator.ValidatorForTopic(name), opts...) if err != nil { return errors.Wrap(err, "could not register topic validator") } diff --git a/network/topics/controller_test.go b/network/topics/controller_test.go index bc1e028cc4..4a09584cfb 100644 --- a/network/topics/controller_test.go +++ b/network/topics/controller_test.go @@ -2,61 +2,94 @@ package topics import ( "context" + "encoding/base64" "encoding/hex" - "fmt" + "encoding/json" + "math" "sync" "sync/atomic" "testing" "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/commons" - - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" + "github.com/bloxapp/ssv/networkconfig" ) func TestTopicManager(t *testing.T) { logger := logging.TestLogger(t) - nPeers := 4 - - pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", - "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", - "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", - "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", - "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", - "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", - "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", - "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - peers := newPeers(ctx, logger, t, nPeers, false, true) - baseTest(t, ctx, logger, peers, pks, 1, 2) + + t.Run("happy flow", func(t *testing.T) { + nPeers := 4 + + pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", + "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", + "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", + "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + peers := newPeers(ctx, logger, t, nPeers, validator, true, nil) + baseTest(t, ctx, logger, peers, pks, 1, 2) + }) + + t.Run("banning peer", func(t *testing.T) { + t.Skip() // TODO: finish the test + + pks := []string{ + "b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + scoreMap := map[peer.ID]*pubsub.PeerScoreSnapshot{} + var scoreMapMu sync.Mutex + + scoreInspector := func(m map[peer.ID]*pubsub.PeerScoreSnapshot) { + b, _ := json.Marshal(m) + t.Logf("peer scores: %v", string(b)) + + scoreMapMu.Lock() + defer scoreMapMu.Unlock() + + scoreMap = m + } + + const nPeers = 4 + peers := newPeers(ctx, logger, t, nPeers, validator, true, scoreInspector) + banningTest(t, ctx, logger, peers, pks, scoreMap, &scoreMapMu) + }) } func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, minMsgCount, maxMsgCount int) { nValidators := len(pks) // nPeers := len(peers) - validatorTopic := func(pkhex string) string { - pk, err := hex.DecodeString(pkhex) - if err != nil { - return "invalid" - } - return commons.ValidatorTopicID(pk)[0] - } - t.Log("subscribing to topics") // listen to topics for _, pk := range pks { @@ -85,7 +118,7 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Add(1) go func(p *P, pk string, pi int) { defer wg.Done() - msg, err := dummyMsg(pk, pi%4) + msg, err := dummyMsg(pk, pi%4, false) require.NoError(t, err) raw, err := msg.Encode() require.NoError(t, err) @@ -146,6 +179,109 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Wait() } +func banningTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, scoreMap map[peer.ID]*pubsub.PeerScoreSnapshot, scoreMapMu *sync.Mutex) { + t.Log("subscribing to topics") + + for _, pk := range pks { + for _, p := range peers { + require.NoError(t, p.tm.Subscribe(logger, validatorTopic(pk))) + } + } + + // wait for the peers to join topics + <-time.After(3 * time.Second) + + t.Log("checking initial scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) + } + } + } + + t.Log("broadcasting messages") + + const invalidMessagesCount = 10 + + // TODO: get current default score, send an invalid rejected message, check the score; then run 10 of them and check the score; then check valid message + + invalidMessages, err := msgSequence(pks[0], invalidMessagesCount, len(pks), true) + require.NoError(t, err) + + var wg sync.WaitGroup + // publish some messages + for i, msg := range invalidMessages { + wg.Add(1) + go func(p *P, pk string, msg *spectypes.SSVMessage) { + defer wg.Done() + + raw, err := msg.Encode() + require.NoError(t, err) + + require.NoError(t, p.tm.Broadcast(validatorTopic(pk), raw, time.Second*10)) + + <-time.After(time.Second * 5) + }(peers[0], pks[i%len(pks)], msg) + } + wg.Wait() + + <-time.After(5 * time.Second) + + t.Log("checking final scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) // TODO: score should change + } + } + } + + //t.Log("unsubscribing") + //// unsubscribing multiple times for each topic + //wg.Add(1) + //go func(p *P, pk string) { + // defer wg.Done() + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // go func(p *P) { + // <-time.After(time.Millisecond) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + // wg.Add(1) + // go func(p *P) { + // defer wg.Done() + // <-time.After(time.Millisecond * 50) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + //}(peer, pk) + // + //wg.Wait() +} + +func validatorTopic(pkhex string) string { + pk, err := hex.DecodeString(pkhex) + if err != nil { + return "invalid" + } + return commons.ValidatorTopicID(pk)[0] +} + type P struct { host host.Host ps *pubsub.PubSub @@ -181,10 +317,10 @@ func (p *P) saveMsg(t string, msg *pubsub.Message) { } // TODO: use p2p/testing -func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator, msgID bool) []*P { +func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) []*P { peers := make([]*P, n) for i := 0; i < n; i++ { - peers[i] = newPeer(ctx, logger, t, msgValidator, msgID) + peers[i] = newPeer(ctx, logger, t, msgValidator, msgID, scoreInspector) } t.Logf("%d peers were created", n) th := uint64(n/2) + uint64(n/4) @@ -203,7 +339,7 @@ func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgV return peers } -func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator, msgID bool) *P { +func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) *P { h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0")) require.NoError(t, err) ds, err := discovery.NewLocalDiscovery(ctx, logger, h) @@ -215,11 +351,11 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator midHandler = NewMsgIDHandler(ctx, 2*time.Minute) go midHandler.Start() } - cfg := &PububConfig{ + cfg := &PubSubConfig{ Host: h, TraceLog: false, MsgIDHandler: midHandler, - MsgHandler: func(topic string, msg *pubsub.Message) error { + MsgHandler: func(_ context.Context, topic string, msg *pubsub.Message) error { p.saveMsg(topic, msg) return nil }, @@ -228,15 +364,13 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator IPColocationWeight: 0, OneEpochDuration: time.Minute, }, + MsgValidator: msgValidator, + ScoreInspector: scoreInspector, + ScoreInspectorInterval: 100 * time.Millisecond, // TODO: add mock for peers.ScoreIndex } - // - if msgValidator { - cfg.MsgValidatorFactory = func(s string) MsgValidatorFunc { - return NewSSVMsgValidator() - } - } - ps, tm, err := NewPubsub(ctx, logger, cfg) + + ps, tm, err := NewPubSub(ctx, logger, cfg) require.NoError(t, err) p = &P{ @@ -258,28 +392,63 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator return p } -func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { +func msgSequence(pkHex string, n, committeeSize int, malformed bool) ([]*spectypes.SSVMessage, error) { + var messages []*spectypes.SSVMessage + + for i := 0; i < n; i++ { + height := i * committeeSize + msg, err := dummyMsg(pkHex, height, malformed) + if err != nil { + return nil, err + } + + messages = append(messages, msg) + } + + return messages, nil +} + +func dummyMsg(pkHex string, height int, malformed bool) (*spectypes.SSVMessage, error) { pk, err := hex.DecodeString(pkHex) if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) - return &spectypes.SSVMessage{ + + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + signature, err := base64.StdEncoding.DecodeString("sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN") + if err != nil { + return nil, err + } + + signedMessage := specqbft.SignedMessage{ + Signature: signature, + Signers: []spectypes.OperatorID{1, 3, 4}, + Message: specqbft.Message{ + MsgType: specqbft.RoundChangeMsgType, + Height: specqbft.Height(height), + Round: 2, + Identifier: id[:], + Root: [32]byte{}, + }, + FullData: nil, + } + + msgData, err := signedMessage.Encode() + if err != nil { + return nil, err + } + + ssvMsg := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), - }, nil + Data: msgData, + } + + if malformed { + ssvMsg.MsgType = math.MaxUint64 + } + + return ssvMsg, nil } // diff --git a/network/topics/metrics.go b/network/topics/metrics.go index 53c651967e..7df570090a 100644 --- a/network/topics/metrics.go +++ b/network/topics/metrics.go @@ -6,15 +6,12 @@ import ( "go.uber.org/zap" ) +// TODO: replace with new metrics var ( metricPubsubTrace = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:network:pubsub:trace", Help: "Traces of pubsub messages", }, []string{"type"}) - metricPubsubMsgValidationResults = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:network:pubsub:msg:validation", - Help: "Traces of pubsub message validation results", - }, []string{"type"}) metricPubsubOutbound = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:p2p:pubsub:msg:out", Help: "Count broadcasted messages", @@ -23,10 +20,6 @@ var ( Name: "ssv:p2p:pubsub:msg:in", Help: "Count incoming messages", }, []string{"topic", "msg_type"}) - metricPubsubActiveMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ssv:p2p:pubsub:msg:val:active", - Help: "Count active message validation", - }, []string{"topic"}) metricPubsubPeerScoreInspect = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ssv:p2p:pubsub:score:inspect", Help: "Gauge for negative peer scores", @@ -38,30 +31,13 @@ func init() { if err := prometheus.Register(metricPubsubTrace); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubMsgValidationResults); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubOutbound); err != nil { logger.Debug("could not register prometheus collector") } if err := prometheus.Register(metricPubsubInbound); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubActiveMsgValidation); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubPeerScoreInspect); err != nil { logger.Debug("could not register prometheus collector") } } - -type msgValidationResult string - -var ( - validationResultNoData msgValidationResult = "no_data" - validationResultEncoding msgValidationResult = "encoding" -) - -func reportValidationResult(result msgValidationResult) { - metricPubsubMsgValidationResults.WithLabelValues(string(result)).Inc() -} diff --git a/network/topics/msg_validator.go b/network/topics/msg_validator.go deleted file mode 100644 index f1329fa698..0000000000 --- a/network/topics/msg_validator.go +++ /dev/null @@ -1,67 +0,0 @@ -package topics - -import ( - "context" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/bloxapp/ssv/network/commons" -) - -// MsgValidatorFunc represents a message validator -type MsgValidatorFunc = func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult - -// NewSSVMsgValidator creates a new msg validator that validates message structure, -// and checks that the message was sent on the right topic. -// TODO: enable post SSZ change, remove logs, break into smaller validators? -func NewSSVMsgValidator() func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - return func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { - topic := pmsg.GetTopic() - metricPubsubActiveMsgValidation.WithLabelValues(topic).Inc() - defer metricPubsubActiveMsgValidation.WithLabelValues(topic).Dec() - if len(pmsg.GetData()) == 0 { - reportValidationResult(validationResultNoData) - return pubsub.ValidationReject - } - msg, err := commons.DecodeNetworkMsg(pmsg.GetData()) - if err != nil { - // can't decode message - // logger.Debug("invalid: can't decode message", zap.Error(err)) - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - if msg == nil { - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - pmsg.ValidatorData = *msg - return pubsub.ValidationAccept - - // Check if the message was sent on the right topic. - // currentTopic := pmsg.GetTopic() - // currentTopicBaseName := fork.GetTopicBaseName(currentTopic) - // topics := fork.ValidatorTopicID(msg.GetID().GetPubKey()) - // for _, tp := range topics { - // if tp == currentTopicBaseName { - // reportValidationResult(validationResultValid) - // return pubsub.ValidationAccept - // } - //} - // reportValidationResult(validationResultTopic) - // return pubsub.ValidationReject - } -} - -//// CombineMsgValidators executes multiple validators -// func CombineMsgValidators(validators ...MsgValidatorFunc) MsgValidatorFunc { -// return func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { -// res := pubsub.ValidationAccept -// for _, v := range validators { -// if res = v(ctx, p, msg); res == pubsub.ValidationReject { -// break -// } -// } -// return res -// } -//} diff --git a/network/topics/msg_validator_test.go b/network/topics/msg_validator_test.go index 3a4f6b2081..dd66fb8312 100644 --- a/network/topics/msg_validator_test.go +++ b/network/topics/msg_validator_test.go @@ -2,44 +2,69 @@ package topics import ( "context" - "encoding/hex" - "fmt" "testing" + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/herumi/bls-eth-go-binary/bls" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" pubsub "github.com/libp2p/go-libp2p-pubsub" ps_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network/commons" - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/bloxapp/ssv/utils/threshold" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" ) func TestMsgValidator(t *testing.T) { - pks := createSharePublicKeys(4) - mv := NewSSVMsgValidator() + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: v1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + mv := validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithShareStorage(ns.Shares())) require.NotNil(t, mv) + slot := networkconfig.TestNetwork.Beacon.GetBeaconNetwork().EstimatedCurrentSlot() + t.Run("valid consensus msg", func(t *testing.T) { - pkHex := pks[0] - msg, err := dummySSVConsensusMsg(pkHex, 15160) + msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, qbft.Height(slot)) require.NoError(t, err) + raw, err := msg.Encode() require.NoError(t, err) - pk, err := hex.DecodeString(pkHex) - require.NoError(t, err) - topics := commons.ValidatorTopicID(pk) + + topics := commons.ValidatorTopicID(share.ValidatorPubKey) pmsg := newPBMsg(raw, commons.GetTopicFullName(topics[0]), []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) - require.Equal(t, res, pubsub.ValidationAccept) + res := mv.ValidatePubsubMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + require.Equal(t, pubsub.ValidationAccept, res) }) // TODO: enable once topic validation is in place - // t.Run("wrong topic", func(t *testing.T) { + //t.Run("wrong topic", func(t *testing.T) { // pkHex := "b5de683dbcb3febe8320cc741948b9282d59b75a6970ed55d6f389da59f26325331b7ea0e71a2552373d0debb6048b8a" - // msg, err := dummySSVConsensusMsg(pkHex, 15160) + // msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, 15160) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) @@ -47,40 +72,26 @@ func TestMsgValidator(t *testing.T) { // require.NoError(t, err) // topics := commons.ValidatorTopicID(pk) // pmsg := newPBMsg(raw, topics[0], []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - // res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) + //}) t.Run("empty message", func(t *testing.T) { pmsg := newPBMsg([]byte{}, "xxx", []byte{}) - res := mv(context.Background(), "xxxx", pmsg) - require.Equal(t, res, pubsub.ValidationReject) + res := mv.ValidatePubsubMessage(context.Background(), "xxxx", pmsg) + require.Equal(t, pubsub.ValidationReject, res) }) // TODO: enable once topic validation is in place - // t.Run("invalid validator public key", func(t *testing.T) { + //t.Run("invalid validator public key", func(t *testing.T) { // msg, err := dummySSVConsensusMsg("10101011", 1) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) // pmsg := newPBMsg(raw, "xxx", []byte{}) - // res := mv(context.Background(), "xxxx", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "xxxx", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) - -} - -func createSharePublicKeys(n int) []string { - threshold.Init() - - var res []string - for i := 0; i < n; i++ { - sk := bls.SecretKey{} - sk.SetByCSPRNG() - pk := sk.GetPublicKey().SerializeToHexStr() - res = append(res, pk) - } - return res + //}) } func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { @@ -93,26 +104,19 @@ func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { return pmsg } -func dummySSVConsensusMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { - pk, err := hex.DecodeString(pkHex) +func dummySSVConsensusMsg(pk spectypes.ValidatorPK, height qbft.Height) (*spectypes.SSVMessage, error) { + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + ks := spectestingutils.Testing4SharesSet() + validSignedMessage := spectestingutils.TestingRoundChangeMessageWithHeightAndIdentifier(ks.Shares[1], 1, height, id[:]) + + encodedSignedMessage, err := validSignedMessage.Encode() if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) + return &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), + Data: encodedSignedMessage, }, nil } diff --git a/network/topics/params/gossipsub.go b/network/topics/params/gossipsub.go index 5e7945768d..c7d51ba8a1 100644 --- a/network/topics/params/gossipsub.go +++ b/network/topics/params/gossipsub.go @@ -6,7 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" ) -var ( +const ( // gsD topic stable mesh target count gsD = 8 // gsDlo topic stable mesh low watermark diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index b7b19fc8ef..a7b0942f34 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -22,7 +22,8 @@ const ( // using value of 50 (prysm changed to 90) dampeningFactor = 50 - subnetTopicsWeight = 4.0 + subnetTopicsWeight = 4.0 + invalidMeshDeliveriesWeight = -800 ) const ( @@ -167,7 +168,7 @@ func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { } if opts.Topic.InvalidMsgDecayTime > 0 { - params.InvalidMessageDeliveriesWeight = -opts.maxScore() / opts.Topic.TopicWeight + params.InvalidMessageDeliveriesWeight = invalidMeshDeliveriesWeight params.InvalidMessageDeliveriesDecay = scoreDecay(opts.Topic.InvalidMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) } else { params.InvalidMessageDeliveriesDecay = 0.1 diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index b4b67b4833..2422422e2b 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -26,7 +26,7 @@ const ( ) // the following are kept in vars to allow flexibility (e.g. in tests) -var ( +const ( // validationQueueSize is the size that we assign to the validation queue validationQueueSize = 512 // outboundQueueSize is the size that we assign to the outbound message queue @@ -34,32 +34,34 @@ var ( // validateThrottle is the amount of goroutines used for pubsub msg validation validateThrottle = 8192 // scoreInspectInterval is the interval for performing score inspect, which goes over all peers scores - scoreInspectInterval = time.Minute + defaultScoreInspectInterval = time.Minute // msgIDCacheTTL specifies how long a message ID will be remembered as seen, 6.4m (as ETH 2.0) msgIDCacheTTL = params.HeartbeatInterval * 550 ) -// PububConfig is the needed config to instantiate pubsub -type PububConfig struct { +// PubSubConfig is the needed config to instantiate pubsub +type PubSubConfig struct { Host host.Host TraceLog bool StaticPeers []peer.AddrInfo MsgHandler PubsubMessageHandler - // MsgValidatorFactory accepts the topic name and returns the corresponding msg validator + // MsgValidator accepts the topic name and returns the corresponding msg validator // in case we need different validators for specific topics, // this should be the place to map a validator to topic - MsgValidatorFactory func(string) MsgValidatorFunc - ScoreIndex peers.ScoreIndex - Scoring *ScoringConfig - MsgIDHandler MsgIDHandler - Discovery discovery.Discovery + MsgValidator messageValidator + ScoreIndex peers.ScoreIndex + Scoring *ScoringConfig + MsgIDHandler MsgIDHandler + Discovery discovery.Discovery ValidateThrottle int ValidationQueueSize int OutboundQueueSize int MsgIDCacheTTL time.Duration - GetValidatorStats network.GetValidatorStats + GetValidatorStats network.GetValidatorStats + ScoreInspector pubsub.ExtendedPeerScoreInspectFn + ScoreInspectorInterval time.Duration } // ScoringConfig is the configuration for peer scoring @@ -76,7 +78,7 @@ type PubsubBundle struct { Resolver MsgPeersResolver } -func (cfg *PububConfig) init() error { +func (cfg *PubSubConfig) init() error { if cfg.Host == nil { return errors.New("bad args: missing host") } @@ -96,14 +98,14 @@ func (cfg *PububConfig) init() error { } // initScoring initializes scoring config -func (cfg *PububConfig) initScoring() { +func (cfg *PubSubConfig) initScoring() { if cfg.Scoring == nil { cfg.Scoring = DefaultScoringConfig() } } -// NewPubsub creates a new pubsub router and the necessary components -func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubsub.PubSub, Controller, error) { +// NewPubSub creates a new pubsub router and the necessary components +func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig) (*pubsub.PubSub, Controller, error) { if err := cfg.init(); err != nil { return nil, nil, err } @@ -133,12 +135,23 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs } var topicScoreFactory func(string) *pubsub.TopicScoreParams - if cfg.ScoreIndex != nil { + + inspector := cfg.ScoreInspector + inspectInterval := cfg.ScoreInspectorInterval + if cfg.ScoreIndex != nil || inspector != nil { cfg.initScoring() - inspector := scoreInspector(logger, cfg.ScoreIndex) + + if inspector == nil { + inspector = scoreInspector(logger, cfg.ScoreIndex) + } + + if inspectInterval == 0 { + inspectInterval = defaultScoreInspectInterval + } + peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPColocationWeight, 0, cfg.Scoring.IPWhilelist...) psOpts = append(psOpts, pubsub.WithPeerScore(peerScoreParams, params.PeerScoreThresholds()), - pubsub.WithPeerScoreInspect(inspector, scoreInspectInterval)) + pubsub.WithPeerScoreInspect(inspector, inspectInterval)) async.Interval(ctx, time.Hour, func() { // reset peer scores metric every hour because it has a label for peer ID which can grow infinitely metricPubsubPeerScoreInspect.Reset() @@ -169,7 +182,7 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs return nil, nil, err } - ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidatorFactory, sf, ps, topicScoreFactory) + ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidator, sf, ps, topicScoreFactory) return ps, ctrl, nil } diff --git a/network/topics/scoring.go b/network/topics/scoring.go index ee0360364a..9e47514262 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -54,7 +54,7 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.Extend } // topicScoreParams factory for creating scoring params for topics -func topicScoreParams(logger *zap.Logger, cfg *PububConfig) func(string) *pubsub.TopicScoreParams { +func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsub.TopicScoreParams { return func(t string) *pubsub.TopicScoreParams { totalValidators, activeValidators, myValidators, err := cfg.GetValidatorStats() if err != nil { diff --git a/operator/duties/attester.go b/operator/duties/attester.go index 6af6f4abd1..f5ab6b4b0a 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -11,19 +11,20 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type AttesterHandler struct { baseHandler - duties *Duties[*eth2apiv1.AttesterDuty] + duties *dutystore.Duties[eth2apiv1.AttesterDuty] fetchCurrentEpoch bool fetchNextEpoch bool } -func NewAttesterHandler() *AttesterHandler { +func NewAttesterHandler(duties *dutystore.Duties[eth2apiv1.AttesterDuty]) *AttesterHandler { h := &AttesterHandler{ - duties: NewDuties[*eth2apiv1.AttesterDuty](), + duties: duties, } h.fetchCurrentEpoch = true h.fetchFirst = true @@ -52,7 +53,7 @@ func (h *AttesterHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // 4. If necessary, fetch duties for the next epoch. // @@ -82,7 +83,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.indicesChanged = false } h.processFetching(ctx, currentEpoch, slot) @@ -98,7 +99,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%slotsPerEpoch == slotsPerEpoch-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) } case reorgEvent := <-h.reorg: @@ -108,18 +109,18 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Previous { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true h.fetchCurrentEpoch = true if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } else if reorgEvent.Current { // reset & re-fetch next epoch duties if in appropriate slot range, // otherwise they will be fetched by the appropriate slot tick. if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -135,7 +136,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset next epoch duties if in appropriate slot range if h.shouldFetchNexEpoch(slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -164,24 +165,26 @@ func (h *AttesterHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *AttesterHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) } } + + h.executeDuties(h.logger, toExecute) } func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) + indices := h.validatorController.CommitteeActiveIndices(epoch) if len(indices) == 0 { return nil @@ -194,7 +197,7 @@ func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, true) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleAttester)) } diff --git a/operator/duties/attester_test.go b/operator/duties/attester_test.go index e0927c1f0a..4292ddf395 100644 --- a/operator/duties/attester_test.go +++ b/operator/duties/attester_test.go @@ -5,50 +5,52 @@ import ( "testing" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.AttesterDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.AttesterDuty) []*spectypes.Duty { +func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*eth2apiv1.AttesterDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleAttester)) @@ -59,15 +61,15 @@ func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.Attes func TestScheduler_Attester_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -95,15 +97,15 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { func TestScheduler_Attester_Diff_Slots(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -136,9 +138,9 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { func TestScheduler_Attester_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -152,7 +154,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { scheduler.indicesChg <- struct{}{} // no execution should happen in slot 0 waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -180,7 +182,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { // STEP 4: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[2]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -193,9 +195,9 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -213,7 +215,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(1), @@ -223,7 +225,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 4: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(2), @@ -238,7 +240,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[0]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -247,7 +249,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[1]}) + expected = expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -261,15 +263,15 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -282,8 +284,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -298,13 +300,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -341,15 +343,15 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { // reorg previous dependent root changed and the indices changed as well func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -363,8 +365,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -379,13 +381,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -398,7 +400,7 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(67), ValidatorIndex: phase0.ValidatorIndex(2), @@ -432,15 +434,15 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -453,8 +455,8 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -468,13 +470,13 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -511,15 +513,15 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { // reorg previous dependent root changed and the indices changed the same slot func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -532,8 +534,8 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -547,13 +549,13 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -566,7 +568,7 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(36), ValidatorIndex: phase0.ValidatorIndex(2), @@ -600,15 +602,15 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_Attester_Reorg_Current(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -621,8 +623,8 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -636,13 +638,13 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -687,15 +689,15 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -708,8 +710,8 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -723,13 +725,13 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -742,7 +744,7 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(65), ValidatorIndex: phase0.ValidatorIndex(2), @@ -783,15 +785,15 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Early_Block(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -817,8 +819,8 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) // STEP 4: trigger head event (block arrival) - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), }, } @@ -833,15 +835,15 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(31)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), @@ -869,15 +871,15 @@ func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(13)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index 15303fef68..f2a3fe5722 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -56,24 +56,3 @@ func (h *baseHandler) Setup( h.reorg = reorgEvents h.indicesChange = indicesChange } - -type Duties[D any] struct { - m map[phase0.Epoch]map[phase0.Slot][]D -} - -func NewDuties[D any]() *Duties[D] { - return &Duties[D]{ - m: make(map[phase0.Epoch]map[phase0.Slot][]D), - } -} - -func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, duty D) { - if _, ok := d.m[epoch]; !ok { - d.m[epoch] = make(map[phase0.Slot][]D) - } - d.m[epoch][slot] = append(d.m[epoch][slot], duty) -} - -func (d *Duties[D]) Reset(epoch phase0.Epoch) { - delete(d.m, epoch) -} diff --git a/operator/duties/dutystore/duties.go b/operator/duties/dutystore/duties.go new file mode 100644 index 0000000000..50fd0d7e22 --- /dev/null +++ b/operator/duties/dutystore/duties.go @@ -0,0 +1,97 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type Duty interface { + eth2apiv1.AttesterDuty | eth2apiv1.ProposerDuty | eth2apiv1.SyncCommitteeDuty +} + +type dutyDescriptor[D Duty] struct { + duty *D + inCommittee bool +} + +type Duties[D Duty] struct { + mu sync.RWMutex + m map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D] +} + +func NewDuties[D Duty]() *Duties[D] { + return &Duties[D]{ + m: make(map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]), + } +} + +func (d *Duties[D]) CommitteeSlotDuties(epoch phase0.Epoch, slot phase0.Slot) []*D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + var duties []*D + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *Duties[D]) ValidatorDuty(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex) *D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + descriptor, ok := descriptorMap[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex, duty *D, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[epoch]; !ok { + d.m[epoch] = make(map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + if _, ok := d.m[epoch][slot]; !ok { + d.m[epoch][slot] = make(map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + d.m[epoch][slot][validatorIndex] = dutyDescriptor[D]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *Duties[D]) ResetEpoch(epoch phase0.Epoch) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, epoch) +} diff --git a/operator/duties/dutystore/store.go b/operator/duties/dutystore/store.go new file mode 100644 index 0000000000..53dbfaefcc --- /dev/null +++ b/operator/duties/dutystore/store.go @@ -0,0 +1,19 @@ +package dutystore + +import ( + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" +) + +type Store struct { + Attester *Duties[eth2apiv1.AttesterDuty] + Proposer *Duties[eth2apiv1.ProposerDuty] + SyncCommittee *SyncCommitteeDuties +} + +func New() *Store { + return &Store{ + Attester: NewDuties[eth2apiv1.AttesterDuty](), + Proposer: NewDuties[eth2apiv1.ProposerDuty](), + SyncCommittee: NewSyncCommitteeDuties(), + } +} diff --git a/operator/duties/dutystore/sync_committee.go b/operator/duties/dutystore/sync_committee.go new file mode 100644 index 0000000000..0ae13041c7 --- /dev/null +++ b/operator/duties/dutystore/sync_committee.go @@ -0,0 +1,76 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type SyncCommitteeDuties struct { + mu sync.RWMutex + m map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty] +} + +func NewSyncCommitteeDuties() *SyncCommitteeDuties { + return &SyncCommitteeDuties{ + m: make(map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]), + } +} + +func (d *SyncCommitteeDuties) CommitteePeriodDuties(period uint64) []*eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + descriptorMap, ok := d.m[period] + if !ok { + return nil + } + + var duties []*eth2apiv1.SyncCommitteeDuty + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *SyncCommitteeDuties) Duty(period uint64, validatorIndex phase0.ValidatorIndex) *eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + duties, ok := d.m[period] + if !ok { + return nil + } + + descriptor, ok := duties[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *SyncCommitteeDuties) Add(period uint64, validatorIndex phase0.ValidatorIndex, duty *eth2apiv1.SyncCommitteeDuty, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[period]; !ok { + d.m[period] = make(map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]) + } + + d.m[period][validatorIndex] = dutyDescriptor[eth2apiv1.SyncCommitteeDuty]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *SyncCommitteeDuties) Reset(period uint64) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, period) +} diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/mocks/scheduler.go index 00cd929622..de1a092c05 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/mocks/scheduler.go @@ -186,18 +186,18 @@ func (m *MockValidatorController) EXPECT() *MockValidatorControllerMockRecorder return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockValidatorController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockValidatorController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockValidatorControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockValidatorControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockValidatorController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).AllActiveIndices), epoch) } // GetOperatorShares mocks base method. @@ -213,3 +213,17 @@ func (mr *MockValidatorControllerMockRecorder) GetOperatorShares() *gomock.Call mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperatorShares", reflect.TypeOf((*MockValidatorController)(nil).GetOperatorShares)) } + +// CommitteeActiveIndices mocks base method. +func (m *MockValidatorController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockValidatorControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).CommitteeActiveIndices), epoch) +} diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 60fde29186..ffb52d42e0 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -11,17 +11,18 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type ProposerHandler struct { baseHandler - duties *Duties[*eth2apiv1.ProposerDuty] + duties *dutystore.Duties[eth2apiv1.ProposerDuty] } -func NewProposerHandler() *ProposerHandler { +func NewProposerHandler(duties *dutystore.Duties[eth2apiv1.ProposerDuty]) *ProposerHandler { return &ProposerHandler{ - duties: NewDuties[*eth2apiv1.ProposerDuty](), + duties: duties, baseHandler: baseHandler{ fetchFirst: true, }, @@ -44,7 +45,7 @@ func (h *ProposerHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // // On Ticker event: @@ -71,7 +72,6 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) h.indicesChanged = false h.processFetching(ctx, currentEpoch, slot) } @@ -79,7 +79,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%h.network.Beacon.SlotsPerEpoch() == h.network.Beacon.SlotsPerEpoch()-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch - 1) h.fetchFirst = true } @@ -90,7 +90,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Current { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true } @@ -116,36 +116,46 @@ func (h *ProposerHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *ProposerHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) } } + h.executeDuties(h.logger, toExecute) } func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) - if len(indices) == 0 { + allIndices := h.validatorController.AllActiveIndices(epoch) + if len(allIndices) == 0 { return nil } - duties, err := h.beaconNode.ProposerDuties(ctx, epoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(epoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.ProposerDuties(ctx, epoch, allIndices) if err != nil { return fmt.Errorf("failed to fetch proposer duties: %w", err) } + h.duties.ResetEpoch(epoch) + specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, inCommitteeDuty) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleProposer)) } diff --git a/operator/duties/proposer_test.go b/operator/duties/proposer_test.go index 8df730b6d3..56860c3c0e 100644 --- a/operator/duties/proposer_test.go +++ b/operator/duties/proposer_test.go @@ -4,48 +4,50 @@ import ( "context" "testing" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().ProposerDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.ProposerDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.ProposerDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.ProposerDuty) []*spectypes.Duty { +func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*eth2apiv1.ProposerDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleProposer)) @@ -55,15 +57,15 @@ func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.Propo func TestScheduler_Proposer_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -87,15 +89,15 @@ func TestScheduler_Proposer_Same_Slot(t *testing.T) { func TestScheduler_Proposer_Diff_Slots(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -129,9 +131,9 @@ func TestScheduler_Proposer_Diff_Slots(t *testing.T) { // execute duty after two slots after the indices changed func TestScheduler_Proposer_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -148,7 +150,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -178,7 +180,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 4: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -191,15 +193,15 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -215,7 +217,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(2), @@ -225,7 +227,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 5}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(3), @@ -239,7 +241,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 5: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -248,7 +250,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -257,7 +259,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -271,15 +273,15 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -292,8 +294,8 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -307,13 +309,13 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -346,15 +348,15 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -367,8 +369,8 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -382,13 +384,13 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -401,7 +403,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(38), ValidatorIndex: phase0.ValidatorIndex(2), @@ -417,7 +419,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 7: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(37)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -426,7 +428,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 8: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(38)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index cb1f5861c6..e53f29bfab 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -19,6 +19,7 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -45,7 +46,8 @@ type BeaconNode interface { // ValidatorController represents the component that controls validators via the scheduler type ValidatorController interface { - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetOperatorShares() []*types.SSVShare } @@ -60,6 +62,7 @@ type SchedulerOptions struct { IndicesChg chan struct{} Ticker SlotTicker BuilderProposals bool + DutyStore *dutystore.Store } type Scheduler struct { @@ -86,6 +89,11 @@ type Scheduler struct { } func NewScheduler(opts *SchedulerOptions) *Scheduler { + dutyStore := opts.DutyStore + if dutyStore == nil { + dutyStore = dutystore.New() + } + s := &Scheduler{ beaconNode: opts.BeaconNode, network: opts.Network, @@ -97,9 +105,9 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { blockPropagateDelay: blockPropagationDelay, handlers: []dutyHandler{ - NewAttesterHandler(), - NewProposerHandler(), - NewSyncCommitteeHandler(), + NewAttesterHandler(dutyStore.Attester), + NewProposerHandler(dutyStore.Proposer), + NewSyncCommitteeHandler(dutyStore.SyncCommittee), }, ticker: make(chan phase0.Slot), diff --git a/operator/duties/synccommittee.go b/operator/duties/sync_committee.go similarity index 86% rename from operator/duties/synccommittee.go rename to operator/duties/sync_committee.go index 0569d7cbfd..7508c4012a 100644 --- a/operator/duties/synccommittee.go +++ b/operator/duties/sync_committee.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) // syncCommitteePreparationEpochs is the number of epochs ahead of the sync committee @@ -21,14 +22,14 @@ var syncCommitteePreparationEpochs = uint64(2) type SyncCommitteeHandler struct { baseHandler - duties *SyncCommitteeDuties + duties *dutystore.SyncCommitteeDuties fetchCurrentPeriod bool fetchNextPeriod bool } -func NewSyncCommitteeHandler() *SyncCommitteeHandler { +func NewSyncCommitteeHandler(duties *dutystore.SyncCommitteeDuties) *SyncCommitteeHandler { h := &SyncCommitteeHandler{ - duties: NewSyncCommitteeDuties(), + duties: duties, } h.fetchCurrentPeriod = true h.fetchFirst = true @@ -39,27 +40,6 @@ func (h *SyncCommitteeHandler) Name() string { return spectypes.BNRoleSyncCommittee.String() } -type SyncCommitteeDuties struct { - m map[uint64][]*eth2apiv1.SyncCommitteeDuty -} - -func NewSyncCommitteeDuties() *SyncCommitteeDuties { - return &SyncCommitteeDuties{ - m: make(map[uint64][]*eth2apiv1.SyncCommitteeDuty), - } -} - -func (d *SyncCommitteeDuties) Add(period uint64, duty *eth2apiv1.SyncCommitteeDuty) { - if _, ok := d.m[period]; !ok { - d.m[period] = []*eth2apiv1.SyncCommitteeDuty{} - } - d.m[period] = append(d.m[period], duty) -} - -func (d *SyncCommitteeDuties) Reset(period uint64) { - delete(d.m, period) -} - // HandleDuties manages the duty lifecycle, handling different cases: // // On First Run: @@ -73,7 +53,7 @@ func (d *SyncCommitteeDuties) Reset(period uint64) { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current period. +// 2. ResetEpoch duties for the current period. // 3. Fetch duties for the current period. // 4. If necessary, fetch duties for the next period. // @@ -100,15 +80,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { if h.fetchFirst { h.fetchFirst = false - h.indicesChanged = false h.processFetching(ctx, period, slot) h.processExecution(period, slot) } else { h.processExecution(period, slot) - if h.indicesChanged { - h.duties.Reset(period) - h.indicesChanged = false - } h.processFetching(ctx, period, slot) } @@ -123,7 +98,7 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { // last slot of period if slot == h.network.Beacon.LastSlotOfSyncPeriod(period) { - h.duties.Reset(period) + h.duties.Reset(period - 1) } case reorgEvent := <-h.reorg: @@ -146,12 +121,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { buildStr := fmt.Sprintf("p%v-e%v-s%v-#%v", period, epoch, slot, slot%32+1) h.logger.Info("🔁 indices change received", zap.String("period_epoch_slot_seq", buildStr)) - h.indicesChanged = true h.fetchCurrentPeriod = true // reset next period duties if in appropriate slot range if h.shouldFetchNextPeriod(slot) { - h.duties.Reset(period + 1) h.fetchNextPeriod = true } } @@ -181,16 +154,19 @@ func (h *SyncCommitteeHandler) processFetching(ctx context.Context, period uint6 func (h *SyncCommitteeHandler) processExecution(period uint64, slot phase0.Slot) { // range over duties and execute - if duties, ok := h.duties.m[period]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d, slot) { - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) - } + duties := h.duties.CommitteePeriodDuties(period) + if duties == nil { + return + } + + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d, slot) { + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) } - h.executeDuties(h.logger, toExecute) } + h.executeDuties(h.logger, toExecute) } func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period uint64) error { @@ -202,19 +178,26 @@ func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period } lastEpoch := h.network.Beacon.FirstEpochOfSyncPeriod(period+1) - 1 - indices := h.validatorController.ActiveValidatorIndices(firstEpoch) - - if len(indices) == 0 { + allActiveIndices := h.validatorController.AllActiveIndices(firstEpoch) + if len(allActiveIndices) == 0 { return nil } - duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(firstEpoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, allActiveIndices) if err != nil { return fmt.Errorf("failed to fetch sync committee duties: %w", err) } + h.duties.Reset(period) for _, d := range duties { - h.duties.Add(period, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(period, d.ValidatorIndex, d, inCommitteeDuty) } h.prepareDutiesResultLog(period, duties, start) diff --git a/operator/duties/synccommittee_test.go b/operator/duties/sync_committee_test.go similarity index 94% rename from operator/duties/synccommittee_test.go rename to operator/duties/sync_committee_test.go index 774cc2c2a5..b2ec6d5d8b 100644 --- a/operator/duties/synccommittee_test.go +++ b/operator/duties/sync_committee_test.go @@ -12,6 +12,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) @@ -55,23 +56,24 @@ func setupSyncCommitteeDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[uint64, [ return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getDuties := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) - duties, _ := dutiesMap.Get(period) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + duties, _ := dutiesMap.Get(period) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() @@ -89,7 +91,7 @@ func expectedExecutedSyncCommitteeDuties(handler *SyncCommitteeHandler, duties [ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -148,7 +150,7 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -215,7 +217,7 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -269,7 +271,7 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -327,7 +329,7 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -399,7 +401,7 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -479,7 +481,7 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index 2ac3a49ea3..cdc2a4b605 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -41,7 +41,7 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { sent := 0 for _, share := range shares { - if !share.HasBeaconMetadata() { + if !share.HasBeaconMetadata() || !share.BeaconMetadata.IsAttesting() { continue } diff --git a/operator/node.go b/operator/node.go index 3dc3589349..8c3c98c959 100644 --- a/operator/node.go +++ b/operator/node.go @@ -15,6 +15,7 @@ import ( "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/fee_recipient" "github.com/bloxapp/ssv/operator/slot_ticker" "github.com/bloxapp/ssv/operator/storage" @@ -40,11 +41,10 @@ type Options struct { DB basedb.Database ValidatorController validator.Controller ValidatorOptions validator.ControllerOptions `yaml:"ValidatorOptions"` - - WS api.WebSocketServer - WsAPIPort int - - Metrics nodeMetrics + DutyStore *dutystore.Store + WS api.WebSocketServer + WsAPIPort int + Metrics nodeMetrics } // operatorNode implements Node interface @@ -102,6 +102,7 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { ExecuteDuty: opts.ValidatorController.ExecuteDuty, Ticker: slotTicker, BuilderProposals: opts.ValidatorOptions.BuilderProposals, + DutyStore: opts.DutyStore, }), feeRecipientCtrl: fee_recipient.NewController(&fee_recipient.ControllerOptions{ Ctx: opts.Context, diff --git a/operator/validator/controller.go b/operator/validator/controller.go index eb43107100..3048a654d6 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -5,6 +5,7 @@ import ( "crypto/rsa" "encoding/hex" "encoding/json" + "fmt" "sync" "time" @@ -22,8 +23,10 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" nodestorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" @@ -76,7 +79,10 @@ type ControllerOptions struct { NewDecidedHandler qbftcontroller.NewDecidedHandler DutyRoles []spectypes.BeaconRole StorageMap *storage.QBFTStores - Metrics validatorMetrics + Metrics validator.Metrics + MessageValidator validation.MessageValidator + ValidatorsMap *validatorsmap.ValidatorsMap + VerifySignatures bool // worker flags WorkersCount int `yaml:"MsgWorkersCount" env:"MSG_WORKERS_COUNT" env-default:"256" env-description:"Number of goroutines to use for message workers"` @@ -88,7 +94,8 @@ type ControllerOptions struct { // it takes care of bootstrapping, updating and managing existing validators and their shares type Controller interface { StartValidators() - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetValidator(pubKey string) (*validator.Validator, bool) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) UpdateValidatorMetaDataLoop() @@ -120,7 +127,7 @@ type controller struct { context context.Context logger *zap.Logger - metrics validatorMetrics + metrics validator.Metrics sharesStorage registrystorage.Shares operatorsStorage registrystorage.Operators @@ -134,8 +141,8 @@ type controller struct { operatorData *registrystorage.OperatorData operatorDataMutex sync.RWMutex - validatorsMap *validatorsMap - validatorOptions *validator.Options + validatorsMap *validatorsmap.ValidatorsMap + validatorOptions validator.Options metadataUpdateInterval time.Duration @@ -144,6 +151,7 @@ type controller struct { messageRouter *messageRouter messageWorker *worker.Worker historySyncBatchSize int + messageValidator validation.MessageValidator // nonCommittees is a cache of initialized nonCommitteeValidator instances nonCommitteeValidators *ttlcache.Cache[spectypes.MessageID, *nonCommitteeValidator] @@ -156,7 +164,7 @@ type controller struct { // NewController creates a new validator controller instance func NewController(logger *zap.Logger, options ControllerOptions) Controller { - logger.Debug("setting validator controller") + logger.Debug("setting up validator controller", zap.Bool("message_validation_verify_signatures", options.VerifySignatures)) // lookup in a map that holds all relevant operators operatorsIDs := &sync.Map{} @@ -167,7 +175,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Buffer: options.QueueBufferSize, } - validatorOptions := &validator.Options{ //TODO add vars + validatorOptions := validator.Options{ //TODO add vars Network: options.Network, Beacon: options.Beacon, BeaconNetwork: options.BeaconNetwork.GetNetwork(), @@ -181,6 +189,9 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Exporter: options.Exporter, BuilderProposals: options.BuilderProposals, GasLimit: options.GasLimit, + MessageValidator: options.MessageValidator, + Metrics: options.Metrics, + VerifySignatures: options.VerifySignatures, } // If full node, increase queue size to make enough room @@ -192,13 +203,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { } } - if options.Metrics == nil { - options.Metrics = nopMetrics{} + metrics := validator.Metrics(validator.NopMetrics{}) + if options.Metrics != nil { + metrics = options.Metrics } ctrl := controller{ logger: logger.Named(logging.NameController), - metrics: options.Metrics, + metrics: metrics, sharesStorage: options.RegistryStorage.Shares(), operatorsStorage: options.RegistryStorage, recipientsStorage: options.RegistryStorage, @@ -210,14 +222,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { keyManager: options.KeyManager, network: options.Network, - validatorsMap: newValidatorsMap(options.Context, validatorOptions), + validatorsMap: options.ValidatorsMap, validatorOptions: validatorOptions, metadataUpdateInterval: options.MetadataUpdateInterval, operatorsIDs: operatorsIDs, - messageRouter: newMessageRouter(), + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, workerCfg), historySyncBatchSize: options.HistorySyncBatchSize, @@ -226,6 +238,8 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { ), metadataLastUpdated: make(map[string]time.Time), indicesChange: make(chan struct{}), + + messageValidator: options.MessageValidator, } // Start automatic expired item deletion in nonCommitteeValidators. @@ -315,12 +329,12 @@ func (c *controller) handleRouterMessages() { pk := msg.GetID().GetPubKey() hexPK := hex.EncodeToString(pk) if v, ok := c.validatorsMap.GetValidator(hexPK); ok { - v.HandleMessage(c.logger, &msg) + v.HandleMessage(c.logger, msg) } else { if msg.MsgType != spectypes.SSVConsensusMsgType { continue // not supporting other types } - if !c.messageWorker.TryEnqueue(&msg) { // start to save non committee decided messages only post fork + if !c.messageWorker.TryEnqueue(msg) { // start to save non committee decided messages only post fork c.logger.Warn("Failed to enqueue post consensus message: buffer is full") } } @@ -336,7 +350,7 @@ var nonCommitteeValidatorTTLs = map[spectypes.BeaconRole]phase0.Slot{ spectypes.BNRoleSyncCommitteeContribution: 4, } -func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { +func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { // Get or create a nonCommitteeValidator for this MessageID, and lock it to prevent // other handlers from processing var ncv *nonCommitteeValidator @@ -354,7 +368,7 @@ func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { return errors.Errorf("could not find validator [%s]", hex.EncodeToString(msg.GetID().GetPubKey())) } - opts := *c.validatorOptions + opts := c.validatorOptions opts.SSVShare = share ncv = &nonCommitteeValidator{ NonCommitteeValidator: validator.NewNonCommitteeValidator(c.logger, msg.GetID(), opts), @@ -460,7 +474,7 @@ func (c *controller) setupNonCommitteeValidators() { for _, validatorShare := range nonCommitteeShares { pubKeys = append(pubKeys, validatorShare.ValidatorPubKey) - opts := *c.validatorOptions + opts := c.validatorOptions opts.SSVShare = validatorShare allRoles := []spectypes.BeaconRole{ spectypes.BNRoleAttester, @@ -548,7 +562,7 @@ func (c *controller) UpdateValidatorMetadata(pk string, metadata *beaconprotocol return nil } -// GetValidator returns a validator instance from validatorsMap +// GetValidator returns a validator instance from ValidatorsMap func (c *controller) GetValidator(pubKey string) (*validator.Validator, bool) { return c.validatorsMap.GetValidator(pubKey) } @@ -565,7 +579,7 @@ func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) { logger.Error("could not create duty execute msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) if err != nil { logger.Error("could not decode duty execute msg", zap.Error(err)) return @@ -601,25 +615,36 @@ func CreateDutyExecuteMsg(duty *spectypes.Duty, pubKey phase0.BLSPubKey, domain }, nil } -// ActiveValidatorIndices fetches indices of validators who are either attesting or queued and +// CommitteeActiveIndices fetches indices of in-committee validators who are either attesting or queued and // whose activation epoch is not greater than the passed epoch. It logs a warning if an error occurs. -func (c *controller) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { - indices := make([]phase0.ValidatorIndex, 0, len(c.validatorsMap.validatorsMap)) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { - // Beacon node throws error when trying to fetch duties for non-existing validators. - if (v.Share.BeaconMetadata.IsAttesting() || v.Share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && - v.Share.BeaconMetadata.ActivationEpoch <= epoch { +func (c *controller) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + validators := c.validatorsMap.GetAll() + indices := make([]phase0.ValidatorIndex, 0, len(validators)) + for _, v := range validators { + if isShareActive(epoch)(v.Share) { indices = append(indices, v.Share.BeaconMetadata.Index) } - return nil - }) - if err != nil { - c.logger.Warn("failed to get all validators public keys", zap.Error(err)) } + return indices +} +func (c *controller) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + shares := c.sharesStorage.List(nil, isShareActive(epoch)) + indices := make([]phase0.ValidatorIndex, len(shares)) + for i, share := range shares { + indices[i] = share.BeaconMetadata.Index + } return indices } +func isShareActive(epoch phase0.Epoch) func(share *ssvtypes.SSVShare) bool { + return func(share *ssvtypes.SSVShare) bool { + return share != nil && share.BeaconMetadata != nil && + (share.BeaconMetadata.IsAttesting() || share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && + share.BeaconMetadata.ActivationEpoch <= epoch + } +} + // onMetadataUpdated is called when validator's metadata was updated func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.ValidatorMetadata) { if meta == nil { @@ -647,7 +672,7 @@ func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.Validator // onShareStop is called when a validator was removed or liquidated func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { - // remove from validatorsMap + // remove from ValidatorsMap v := c.validatorsMap.RemoveValidator(hex.EncodeToString(pubKey)) // stop instance @@ -663,23 +688,56 @@ func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { } if err := c.setShareFeeRecipient(share, c.recipientsStorage.GetRecipientData); err != nil { - return false, errors.Wrap(err, "could not set share fee recipient") + return false, fmt.Errorf("could not set share fee recipient: %w", err) } // Start a committee validator. - v, err := c.validatorsMap.GetOrCreateValidator(c.logger.Named("validatorsMap"), share) - if err != nil { - return false, errors.Wrap(err, "could not get or create validator") + v, found := c.validatorsMap.GetValidator(hex.EncodeToString(share.ValidatorPubKey)) + if !found { + if !share.HasBeaconMetadata() { + return false, fmt.Errorf("beacon metadata is missing") + } + + // Share context with both the validator and the runners, + // so that when the validator is stopped, the runners are stopped as well. + ctx, cancel := context.WithCancel(c.context) + + opts := c.validatorOptions + opts.SSVShare = share + opts.DutyRunners = SetupRunners(ctx, c.logger, opts) + + v = validator.NewValidator(ctx, cancel, opts) + c.validatorsMap.CreateValidator(hex.EncodeToString(share.ValidatorPubKey), v) + + c.printShare(share, "setup validator done") + + } else { + c.printShare(v.Share, "get validator") } + return c.startValidator(v) } +func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { + committee := make([]string, len(s.Committee)) + for i, c := range s.Committee { + committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) + } + c.logger.Debug(msg, + fields.PubKey(s.ValidatorPubKey), + zap.Uint64("node_id", s.OperatorID), + zap.Strings("committee", committee), + fields.FeeRecipient(s.FeeRecipientAddress[:]), + ) +} + func (c *controller) setShareFeeRecipient(share *ssvtypes.SSVShare, getRecipientData GetRecipientDataFunc) error { - var feeRecipient bellatrix.ExecutionAddress data, found, err := getRecipientData(nil, share.OwnerAddress) if err != nil { return errors.Wrap(err, "could not get recipient data") } + + var feeRecipient bellatrix.ExecutionAddress if !found { c.logger.Debug("setting fee recipient to owner address", fields.Validator(share.ValidatorPubKey), fields.FeeRecipient(share.OwnerAddress.Bytes())) @@ -718,11 +776,6 @@ func (c *controller) UpdateValidatorMetaDataLoop() { // Prepare share filters. filters := []registrystorage.SharesFilter{} - // Filter for validators who belong to our operator. - if !c.validatorOptions.Exporter { - filters = append(filters, registrystorage.ByOperatorID(c.GetOperatorData().ID)) - } - // Filter for validators who are not liquidated. filters = append(filters, registrystorage.ByNotLiquidated()) @@ -798,9 +851,10 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, - Storage: options.Storage.Get(role), - Network: options.Network, - Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + Storage: options.Storage.Get(role), + Network: options.Network, + Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + SignatureVerification: options.VerifySignatures, } config.ValueCheckF = valueCheckF diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index 6a06733db2..2135d24ff3 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -7,17 +7,18 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" - - "github.com/bloxapp/ssv/logging" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/queue/worker" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -32,37 +33,45 @@ func TestHandleNonCommitteeMessages(t *testing.T) { var wg sync.WaitGroup - ctr.messageWorker.UseHandler(func(msg *spectypes.SSVMessage) error { + ctr.messageWorker.UseHandler(func(msg *queue.DecodedSSVMessage) error { wg.Done() return nil }) wg.Add(2) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.BNRoleAttester) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.BNRoleAttester) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateDecidedMessage(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateDecidedMessage(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateChangeRoundMsg(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateChangeRoundMsg(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: message.SSVSyncMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: message.SSVSyncMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: spectypes.SSVPartialSignatureMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) go func() { @@ -144,27 +153,25 @@ func TestGetIndices(t *testing.T) { logger := logging.TestLogger(t) ctr := setupController(logger, validators) - activeIndicesForCurrentEpoch := ctr.ActiveValidatorIndices(currentEpoch) + activeIndicesForCurrentEpoch := ctr.CommitteeActiveIndices(currentEpoch) require.Equal(t, 2, len(activeIndicesForCurrentEpoch)) // should return only active indices - activeIndicesForNextEpoch := ctr.ActiveValidatorIndices(currentEpoch + 1) + activeIndicesForNextEpoch := ctr.CommitteeActiveIndices(currentEpoch + 1) require.Equal(t, 3, len(activeIndicesForNextEpoch)) // should return including ValidatorStatePendingQueued } func setupController(logger *zap.Logger, validators map[string]*validator.Validator) controller { + validatorsMap := validatorsmap.New(context.TODO(), validatorsmap.WithInitialState(validators)) + return controller{ context: context.Background(), sharesStorage: nil, beacon: nil, keyManager: nil, shareEncryptionKeyProvider: nil, - validatorsMap: &validatorsMap{ - ctx: context.Background(), - lock: sync.RWMutex{}, - validatorsMap: validators, - }, - metadataUpdateInterval: 0, - messageRouter: newMessageRouter(), + validatorsMap: validatorsMap, + metadataUpdateInterval: 0, + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, &worker.Config{ Ctx: context.Background(), WorkersCount: 1, diff --git a/operator/validator/metrics.go b/operator/validator/metrics.go index 2ab82cbfc4..d9cb36e817 100644 --- a/operator/validator/metrics.go +++ b/operator/validator/metrics.go @@ -33,31 +33,3 @@ func (c *controller) reportValidatorStatus(pk []byte, meta *beacon.ValidatorMeta c.metrics.ValidatorUnknown(pk) } } - -type validatorMetrics interface { - ValidatorInactive(publicKey []byte) - ValidatorNoIndex(publicKey []byte) - ValidatorError(publicKey []byte) - ValidatorReady(publicKey []byte) - ValidatorNotActivated(publicKey []byte) - ValidatorExiting(publicKey []byte) - ValidatorSlashed(publicKey []byte) - ValidatorNotFound(publicKey []byte) - ValidatorPending(publicKey []byte) - ValidatorRemoved(publicKey []byte) - ValidatorUnknown(publicKey []byte) -} - -type nopMetrics struct{} - -func (n nopMetrics) ValidatorInactive([]byte) {} -func (n nopMetrics) ValidatorNoIndex([]byte) {} -func (n nopMetrics) ValidatorError([]byte) {} -func (n nopMetrics) ValidatorReady([]byte) {} -func (n nopMetrics) ValidatorNotActivated([]byte) {} -func (n nopMetrics) ValidatorExiting([]byte) {} -func (n nopMetrics) ValidatorSlashed([]byte) {} -func (n nopMetrics) ValidatorNotFound([]byte) {} -func (n nopMetrics) ValidatorPending([]byte) {} -func (n nopMetrics) ValidatorRemoved([]byte) {} -func (n nopMetrics) ValidatorUnknown([]byte) {} diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 601d72a936..38121bae8e 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -40,18 +40,18 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockController)(nil).AllActiveIndices), epoch) } // ExecuteDuty mocks base method. @@ -126,6 +126,20 @@ func (mr *MockControllerMockRecorder) GetValidatorStats() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorStats", reflect.TypeOf((*MockController)(nil).GetValidatorStats)) } +// CommitteeActiveIndices mocks base method. +func (m *MockController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockController)(nil).CommitteeActiveIndices), epoch) +} + // IndicesChangeChan mocks base method. func (m *MockController) IndicesChangeChan() chan struct{} { m.ctrl.T.Helper() diff --git a/operator/validator/router.go b/operator/validator/router.go index 67ef8860a9..e090cff3bc 100644 --- a/operator/validator/router.go +++ b/operator/validator/router.go @@ -1,34 +1,40 @@ package validator import ( - spectypes "github.com/bloxapp/ssv-spec/types" + "context" + "go.uber.org/zap" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) const bufSize = 1024 -func newMessageRouter() *messageRouter { +func newMessageRouter(logger *zap.Logger) *messageRouter { return &messageRouter{ - ch: make(chan spectypes.SSVMessage, bufSize), - msgID: commons.MsgID(), + logger: logger, + ch: make(chan *queue.DecodedSSVMessage, bufSize), + msgID: commons.MsgID(), } } type messageRouter struct { - ch chan spectypes.SSVMessage - msgID commons.MsgIDFunc + logger *zap.Logger + ch chan *queue.DecodedSSVMessage + msgID commons.MsgIDFunc } -func (r *messageRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { +func (r *messageRouter) Route(ctx context.Context, message *queue.DecodedSSVMessage) { select { + case <-ctx.Done(): + r.logger.Warn("context canceled, dropping message") case r.ch <- message: default: - logger.Warn("message router buffer is full. dropping message") + r.logger.Warn("message router buffer is full, dropping message") } } -func (r *messageRouter) GetMessageChan() <-chan spectypes.SSVMessage { +func (r *messageRouter) GetMessageChan() <-chan *queue.DecodedSSVMessage { return r.ch } diff --git a/operator/validator/router_test.go b/operator/validator/router_test.go index 787e2b988d..44b3798cac 100644 --- a/operator/validator/router_test.go +++ b/operator/validator/router_test.go @@ -10,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestRouter(t *testing.T) { @@ -19,7 +20,7 @@ func TestRouter(t *testing.T) { logger := logging.TestLogger(t) - router := newMessageRouter() + router := newMessageRouter(logger) expectedCount := 1000 count := 0 @@ -40,14 +41,17 @@ func TestRouter(t *testing.T) { }() for i := 0; i < expectedCount; i++ { - msg := spectypes.SSVMessage{ - MsgType: spectypes.MsgType(i % 3), - MsgID: spectypes.NewMsgID(types.GetDefaultDomain(), []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), - Data: []byte(fmt.Sprintf("data-%d", i)), + msg := &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.MsgType(i % 3), + MsgID: spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), + Data: []byte(fmt.Sprintf("data-%d", i)), + }, } - router.Route(logger, msg) + + router.Route(context.TODO(), msg) if i%2 == 0 { - go router.Route(logger, msg) + go router.Route(context.TODO(), msg) } } diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 5fd3a4c27b..f3b967b5b3 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -1,7 +1,6 @@ package validator import ( - "fmt" "time" spectypes "github.com/bloxapp/ssv-spec/types" @@ -89,17 +88,14 @@ func (c *controller) UpdateFeeRecipient(owner, recipient common.Address) error { zap.String("owner", owner.String()), zap.String("fee_recipient", recipient.String())) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { + c.validatorsMap.ForEach(func(v *validator.Validator) bool { if v.Share.OwnerAddress == owner { v.Share.FeeRecipientAddress = recipient logger.Debug("updated recipient address") } - return nil + return true }) - if err != nil { - return fmt.Errorf("update validators map: %w", err) - } return nil } diff --git a/operator/validator/validators_map.go b/operator/validator/validators_map.go deleted file mode 100644 index 02d351f39c..0000000000 --- a/operator/validator/validators_map.go +++ /dev/null @@ -1,126 +0,0 @@ -package validator - -// TODO(nkryuchkov): remove old validator interface(s) -import ( - "context" - "encoding/hex" - "fmt" - "sync" - - "github.com/bloxapp/ssv/logging/fields" - - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -// validatorIterator is the function used to iterate over existing validators -type validatorIterator func(validator *validator.Validator) error - -// validatorsMap manages a collection of running validators -type validatorsMap struct { - ctx context.Context - - optsTemplate *validator.Options - - lock sync.RWMutex - validatorsMap map[string]*validator.Validator -} - -func newValidatorsMap(ctx context.Context, optsTemplate *validator.Options) *validatorsMap { - vm := validatorsMap{ - ctx: ctx, - lock: sync.RWMutex{}, - validatorsMap: make(map[string]*validator.Validator), - optsTemplate: optsTemplate, - } - - return &vm -} - -// ForEach loops over validators -func (vm *validatorsMap) ForEach(iterator validatorIterator) error { - vm.lock.RLock() - defer vm.lock.RUnlock() - - for _, val := range vm.validatorsMap { - if err := iterator(val); err != nil { - return err - } - } - return nil -} - -// GetValidator returns a validator -func (vm *validatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { - // main lock - vm.lock.RLock() - defer vm.lock.RUnlock() - - v, ok := vm.validatorsMap[pubKey] - - return v, ok -} - -// GetOrCreateValidator creates a new validator instance if not exist -func (vm *validatorsMap) GetOrCreateValidator(logger *zap.Logger, share *types.SSVShare) (*validator.Validator, error) { - // main lock - vm.lock.Lock() - defer vm.lock.Unlock() - - pubKey := hex.EncodeToString(share.ValidatorPubKey) - if v, ok := vm.validatorsMap[pubKey]; !ok { - if !share.HasBeaconMetadata() { - return nil, fmt.Errorf("beacon metadata is missing") - } - opts := *vm.optsTemplate - opts.SSVShare = share - - // Share context with both the validator and the runners, - // so that when the validator is stopped, the runners are stopped as well. - ctx, cancel := context.WithCancel(vm.ctx) - opts.DutyRunners = SetupRunners(ctx, logger, opts) - vm.validatorsMap[pubKey] = validator.NewValidator(ctx, cancel, opts) - - printShare(share, logger, "setup validator done") - opts.SSVShare = nil - } else { - printShare(v.Share, logger, "get validator") - } - - return vm.validatorsMap[pubKey], nil -} - -// RemoveValidator removes a validator instance from the map -func (vm *validatorsMap) RemoveValidator(pubKey string) *validator.Validator { - if v, found := vm.GetValidator(pubKey); found { - vm.lock.Lock() - defer vm.lock.Unlock() - - delete(vm.validatorsMap, pubKey) - return v - } - return nil -} - -// Size returns the number of validators in the map -func (vm *validatorsMap) Size() int { - vm.lock.RLock() - defer vm.lock.RUnlock() - - return len(vm.validatorsMap) -} - -func printShare(s *types.SSVShare, logger *zap.Logger, msg string) { - committee := make([]string, len(s.Committee)) - for i, c := range s.Committee { - committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) - } - logger.Debug(msg, - fields.PubKey(s.ValidatorPubKey), - zap.Uint64("node_id", s.OperatorID), - zap.Strings("committee", committee), - fields.FeeRecipient(s.FeeRecipientAddress[:]), - ) -} diff --git a/operator/validatorsmap/validators_map.go b/operator/validatorsmap/validators_map.go new file mode 100644 index 0000000000..badc404b1c --- /dev/null +++ b/operator/validatorsmap/validators_map.go @@ -0,0 +1,110 @@ +package validatorsmap + +// TODO(nkryuchkov): remove old validator interface(s) +import ( + "context" + "sync" + + "github.com/bloxapp/ssv/protocol/v2/ssv/validator" +) + +// validatorIterator is the function used to iterate over existing validators +type validatorIterator func(validator *validator.Validator) bool + +// ValidatorsMap manages a collection of running validators +type ValidatorsMap struct { + ctx context.Context + lock sync.RWMutex + validatorsMap map[string]*validator.Validator +} + +func New(ctx context.Context, opts ...Option) *ValidatorsMap { + vm := &ValidatorsMap{ + ctx: ctx, + lock: sync.RWMutex{}, + validatorsMap: make(map[string]*validator.Validator), + } + + for _, opt := range opts { + opt(vm) + } + + return vm +} + +// Option defines EventSyncer configuration option. +type Option func(*ValidatorsMap) + +// WithInitialState sets initial state +func WithInitialState(state map[string]*validator.Validator) Option { + return func(vm *ValidatorsMap) { + vm.validatorsMap = state + } +} + +// ForEach loops over validators +func (vm *ValidatorsMap) ForEach(iterator validatorIterator) bool { + vm.lock.RLock() + defer vm.lock.RUnlock() + + for _, val := range vm.validatorsMap { + if !iterator(val) { + return false + } + } + return true +} + +// GetAll returns all validators. +func (vm *ValidatorsMap) GetAll() []*validator.Validator { + vm.lock.RLock() + defer vm.lock.RUnlock() + + var validators []*validator.Validator + for _, val := range vm.validatorsMap { + validators = append(validators, val) + } + + return validators +} + +// GetValidator returns a validator +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { + vm.lock.RLock() + defer vm.lock.RUnlock() + + v, ok := vm.validatorsMap[pubKey] + + return v, ok +} + +// CreateValidator creates a new validator instance +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) CreateValidator(pubKey string, v *validator.Validator) { + vm.lock.Lock() + defer vm.lock.Unlock() + + vm.validatorsMap[pubKey] = v +} + +// RemoveValidator removes a validator instance from the map +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) RemoveValidator(pubKey string) *validator.Validator { + if v, found := vm.GetValidator(pubKey); found { + vm.lock.Lock() + defer vm.lock.Unlock() + + delete(vm.validatorsMap, pubKey) + return v + } + return nil +} + +// Size returns the number of validators in the map +func (vm *ValidatorsMap) Size() int { + vm.lock.RLock() + defer vm.lock.RUnlock() + + return len(vm.validatorsMap) +} diff --git a/protocol/v2/blockchain/beacon/mocks/network.go b/protocol/v2/blockchain/beacon/mocks/network.go index 0a129035f2..65c124cbf1 100644 --- a/protocol/v2/blockchain/beacon/mocks/network.go +++ b/protocol/v2/blockchain/beacon/mocks/network.go @@ -233,6 +233,20 @@ func (mr *MockBeaconNetworkMockRecorder) GetNetwork() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetwork", reflect.TypeOf((*MockBeaconNetwork)(nil).GetNetwork)) } +// GetSlotEndTime mocks base method. +func (m *MockBeaconNetwork) GetSlotEndTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotEndTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotEndTime indicates an expected call of GetSlotEndTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotEndTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotEndTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotEndTime), slot) +} + // GetSlotStartTime mocks base method. func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/network.go b/protocol/v2/blockchain/beacon/network.go index e9f0c240c4..965890540f 100644 --- a/protocol/v2/blockchain/beacon/network.go +++ b/protocol/v2/blockchain/beacon/network.go @@ -29,6 +29,7 @@ type BeaconNetwork interface { EpochStartTime(epoch phase0.Epoch) time.Time GetSlotStartTime(slot phase0.Slot) time.Time + GetSlotEndTime(slot phase0.Slot) time.Time IsFirstSlotOfEpoch(slot phase0.Slot) bool GetEpochFirstSlot(epoch phase0.Epoch) phase0.Slot @@ -82,6 +83,11 @@ func (n Network) GetSlotStartTime(slot phase0.Slot) time.Time { return start } +// GetSlotEndTime returns the end time for the given slot +func (n Network) GetSlotEndTime(slot phase0.Slot) time.Time { + return n.GetSlotStartTime(slot + 1) +} + // EstimatedCurrentSlot returns the estimation of the current slot func (n Network) EstimatedCurrentSlot() phase0.Slot { return n.EstimatedSlotAtTime(time.Now().Unix()) diff --git a/protocol/v2/blockchain/beacon/network_test.go b/protocol/v2/blockchain/beacon/network_test.go new file mode 100644 index 0000000000..a5646bf36a --- /dev/null +++ b/protocol/v2/blockchain/beacon/network_test.go @@ -0,0 +1,19 @@ +package beacon + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestNetwork_GetSlotEndTime(t *testing.T) { + slot := phase0.Slot(1) + + n := NewNetwork(spectypes.PraterNetwork) + slotStart := n.GetSlotStartTime(slot) + slotEnd := n.GetSlotEndTime(slot) + + require.Equal(t, n.SlotDurationSec(), slotEnd.Sub(slotStart)) +} diff --git a/protocol/v2/qbft/config.go b/protocol/v2/qbft/config.go index 2698d2877e..21aae3df6b 100644 --- a/protocol/v2/qbft/config.go +++ b/protocol/v2/qbft/config.go @@ -27,17 +27,20 @@ type IConfig interface { GetStorage() qbftstorage.QBFTStore // GetTimer returns round timer GetTimer() roundtimer.Timer + // VerifySignatures returns if signature is checked + VerifySignatures() bool } type Config struct { - Signer spectypes.SSVSigner - SigningPK []byte - Domain spectypes.DomainType - ValueCheckF specqbft.ProposedValueCheckF - ProposerF specqbft.ProposerF - Storage qbftstorage.QBFTStore - Network specqbft.Network - Timer roundtimer.Timer + Signer spectypes.SSVSigner + SigningPK []byte + Domain spectypes.DomainType + ValueCheckF specqbft.ProposedValueCheckF + ProposerF specqbft.ProposerF + Storage qbftstorage.QBFTStore + Network specqbft.Network + Timer roundtimer.Timer + SignatureVerification bool } // GetSigner returns a Signer instance @@ -79,3 +82,7 @@ func (c *Config) GetStorage() qbftstorage.QBFTStore { func (c *Config) GetTimer() roundtimer.Timer { return c.Timer } + +func (c *Config) VerifySignatures() bool { + return c.SignatureVerification +} diff --git a/protocol/v2/qbft/controller/future_msg.go b/protocol/v2/qbft/controller/future_msg.go index 30a205ff6e..7c93cd0fe2 100644 --- a/protocol/v2/qbft/controller/future_msg.go +++ b/protocol/v2/qbft/controller/future_msg.go @@ -39,9 +39,10 @@ func ValidateFutureMsg( return errors.New("allows 1 signer") } - // verify signature - if err := types.VerifyByOperators(msg.Signature, msg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(msg.Signature, msg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/commit.go b/protocol/v2/qbft/instance/commit.go index 5620602ea6..53d4f5855e 100644 --- a/protocol/v2/qbft/instance/commit.go +++ b/protocol/v2/qbft/instance/commit.go @@ -158,9 +158,10 @@ func BaseCommitValidation( return errors.Wrap(err, "signed commit invalid") } - // verify signature - if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index 7714771b88..55748b33c2 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -159,8 +159,10 @@ func validSignedPrepareForHeightRoundAndRoot( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/proposal.go b/protocol/v2/qbft/instance/proposal.go index 0b112756c1..a4b5303ada 100644 --- a/protocol/v2/qbft/instance/proposal.go +++ b/protocol/v2/qbft/instance/proposal.go @@ -10,7 +10,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) // uponProposal process proposal message @@ -77,8 +77,10 @@ func isValidProposal( if len(signedProposal.GetSigners()) != 1 { return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := ssvtypes.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if !signedProposal.MatchedSigners([]spectypes.OperatorID{proposer(state, config, signedProposal.Message.Round)}) { return errors.New("proposal leader invalid") @@ -121,6 +123,30 @@ func isValidProposal( return errors.New("proposal is not valid with current state") } +func IsProposalJustification( + config qbft.IConfig, + share *ssvtypes.SSVShare, + roundChangeMsgs []*specqbft.SignedMessage, + prepareMsgs []*specqbft.SignedMessage, + height specqbft.Height, + round specqbft.Round, + fullData []byte, +) error { + return isProposalJustification( + &specqbft.State{ + Share: &share.Share, + Height: height, + }, + config, + roundChangeMsgs, + prepareMsgs, + height, + round, + fullData, + func(data []byte) error { return nil }, + ) +} + // isProposalJustification returns nil if the proposal and round change messages are valid and justify a proposal message for the provided round, value and leader func isProposalJustification( state *specqbft.State, @@ -256,7 +282,7 @@ func CreateProposal(state *specqbft.State, config qbft.IConfig, fullData []byte, } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing proposal msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/round_change.go b/protocol/v2/qbft/instance/round_change.go index 0fb7a54486..00cd676b3d 100644 --- a/protocol/v2/qbft/instance/round_change.go +++ b/protocol/v2/qbft/instance/round_change.go @@ -30,8 +30,11 @@ func (i *Instance) uponRoundChange( return nil // UponCommit was already called } - logger = logger.With(fields.Round(i.State.Round), - fields.Height(i.State.Height)) + logger = logger.With( + fields.Round(i.State.Round), + fields.Height(i.State.Height), + zap.Uint64("msg_round", uint64(signedRoundChange.Message.Round)), + ) logger.Debug("🔄 got round change", fields.Root(signedRoundChange.Message.Root), @@ -249,8 +252,10 @@ func validRoundChangeForData( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if err := signedMsg.Message.Validate(); err != nil { @@ -379,7 +384,7 @@ func CreateRoundChange(state *specqbft.State, config qbft.IConfig, newRound spec } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing round change msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index 4890b1d27f..fde166f3dc 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -16,9 +16,9 @@ import ( type OnRoundTimeoutF func(round specqbft.Round) const ( - quickTimeoutThreshold = specqbft.Round(8) - quickTimeout = 2 * time.Second - slowTimeout = 2 * time.Minute + QuickTimeoutThreshold = specqbft.Round(8) + QuickTimeout = 2 * time.Second + SlowTimeout = 2 * time.Minute ) // Timer is an interface for a round timer, calling the UponRoundTimeout when times out @@ -70,9 +70,9 @@ func New(pctx context.Context, beaconNetwork BeaconNetwork, role spectypes.Beaco role: role, beaconNetwork: beaconNetwork, timeoutOptions: TimeoutOptions{ - quickThreshold: quickTimeoutThreshold, - quick: quickTimeout, - slow: slowTimeout, + quickThreshold: QuickTimeoutThreshold, + quick: QuickTimeout, + slow: SlowTimeout, }, } } diff --git a/protocol/v2/qbft/spectest/qbft_mapping_test.go b/protocol/v2/qbft/spectest/qbft_mapping_test.go index d771e98d1f..082d06d54a 100644 --- a/protocol/v2/qbft/spectest/qbft_mapping_test.go +++ b/protocol/v2/qbft/spectest/qbft_mapping_test.go @@ -12,9 +12,10 @@ import ( "github.com/bloxapp/ssv-spec/qbft/spectest/tests/timeout" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/logging" testing2 "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" @@ -31,18 +32,12 @@ func TestQBFTMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test - testName := strings.Split(name, "_")[1] testType := strings.Split(name, "_")[0] - switch testType { case reflect.TypeOf(&spectests.MsgProcessingSpecTest{}).String(): byts, err := json.Marshal(test) @@ -51,6 +46,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsgProcessing(t, typedTest) }) case reflect.TypeOf(&spectests.MsgSpecTest{}).String(): @@ -60,6 +56,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsg(t, typedTest) }) case reflect.TypeOf(&spectests.ControllerSpecTest{}).String(): @@ -69,6 +66,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunControllerSpecTest(t, typedTest) }) case reflect.TypeOf(&spectests.CreateMsgSpecTest{}).String(): @@ -78,6 +76,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunCreateMsg(t, typedTest) }) case reflect.TypeOf(&spectests.RoundRobinSpecTest{}).String(): @@ -87,12 +86,12 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { // using only spec struct so no need to run our version (TODO: check how we choose leader) + t.Parallel() typedTest.Run(t) }) /*t.Run(typedTest.TestName(), func(t *testing.T) { RunMsg(t, typedTest) })*/ - case reflect.TypeOf(&futuremsg.ControllerSyncSpecTest{}).String(): byts, err := json.Marshal(test) require.NoError(t, err) @@ -100,6 +99,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunControllerSync(t, typedTest) }) case reflect.TypeOf(&timeout.SpecTest{}).String(): diff --git a/protocol/v2/qbft/testing/utils.go b/protocol/v2/qbft/testing/utils.go index f07470c007..c6741925ce 100644 --- a/protocol/v2/qbft/testing/utils.go +++ b/protocol/v2/qbft/testing/utils.go @@ -7,12 +7,12 @@ import ( "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/pkg/errors" + "go.uber.org/zap" + "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" - - "github.com/pkg/errors" - "go.uber.org/zap" ) var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, role types.BeaconRole) *qbft.Config { @@ -34,9 +34,10 @@ var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, ro ProposerF: func(state *specqbft.State, round specqbft.Round) types.OperatorID { return 1 }, - Storage: TestingStores(logger).Get(role), - Network: testingutils.NewTestingNetwork(), - Timer: roundtimer.NewTestingTimer(), + Storage: TestingStores(logger).Get(role), + Network: testingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + SignatureVerification: true, } } diff --git a/protocol/v2/queue/worker/message_worker.go b/protocol/v2/queue/worker/message_worker.go index ee96301870..5c9f2b3f97 100644 --- a/protocol/v2/queue/worker/message_worker.go +++ b/protocol/v2/queue/worker/message_worker.go @@ -2,11 +2,12 @@ package worker import ( "context" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) var ( @@ -24,12 +25,12 @@ func init() { } // MsgHandler func that receive message.SSVMessage to handle -type MsgHandler func(msg *spectypes.SSVMessage) error +type MsgHandler func(msg *queue.DecodedSSVMessage) error // ErrorHandler func that handles an error for a specific message -type ErrorHandler func(msg *spectypes.SSVMessage, err error) error +type ErrorHandler func(msg *queue.DecodedSSVMessage, err error) error -func defaultErrHandler(msg *spectypes.SSVMessage, err error) error { +func defaultErrHandler(msg *queue.DecodedSSVMessage, err error) error { return err } @@ -46,7 +47,7 @@ type Worker struct { ctx context.Context cancel context.CancelFunc workersCount int - queue chan *spectypes.SSVMessage + queue chan *queue.DecodedSSVMessage handler MsgHandler errHandler ErrorHandler metricsPrefix string @@ -60,7 +61,7 @@ func NewWorker(logger *zap.Logger, cfg *Config) *Worker { ctx: ctx, cancel: cancel, workersCount: cfg.WorkersCount, - queue: make(chan *spectypes.SSVMessage, cfg.Buffer), + queue: make(chan *queue.DecodedSSVMessage, cfg.Buffer), errHandler: defaultErrHandler, metricsPrefix: cfg.MetrixPrefix, } @@ -78,7 +79,7 @@ func (w *Worker) init(logger *zap.Logger) { } // startWorker process functionality -func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *spectypes.SSVMessage) { +func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *queue.DecodedSSVMessage) { ctx, cancel := context.WithCancel(w.ctx) defer cancel() for { @@ -104,7 +105,7 @@ func (w *Worker) UseErrorHandler(errHandler ErrorHandler) { // TryEnqueue tries to enqueue a job to the given job channel. Returns true if // the operation was successful, and false if enqueuing would not have been // possible without blocking. Job is not enqueued in the latter case. -func (w *Worker) TryEnqueue(msg *spectypes.SSVMessage) bool { +func (w *Worker) TryEnqueue(msg *queue.DecodedSSVMessage) bool { select { case w.queue <- msg: return true @@ -125,7 +126,7 @@ func (w *Worker) Size() int { } // process the msg's from queue -func (w *Worker) process(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (w *Worker) process(logger *zap.Logger, msg *queue.DecodedSSVMessage) { if w.handler == nil { logger.Warn("❗ no handler for worker") return diff --git a/protocol/v2/queue/worker/message_worker_test.go b/protocol/v2/queue/worker/message_worker_test.go index b5cec21317..adbf5032d0 100644 --- a/protocol/v2/queue/worker/message_worker_test.go +++ b/protocol/v2/queue/worker/message_worker_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestWorker(t *testing.T) { @@ -20,12 +20,12 @@ func TestWorker(t *testing.T) { Buffer: 2, }) - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) return nil }) for i := 0; i < 5; i++ { - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) time.Sleep(time.Second * 1) } } @@ -41,7 +41,7 @@ func TestManyWorkers(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() return nil @@ -49,7 +49,7 @@ func TestManyWorkers(t *testing.T) { for i := 0; i < 10; i++ { wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } @@ -65,7 +65,7 @@ func TestBuffer(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() time.Sleep(time.Millisecond * 100) @@ -74,7 +74,7 @@ func TestBuffer(t *testing.T) { for i := 0; i < 11; i++ { // should buffer 10 msgs wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } diff --git a/protocol/v2/ssv/queue/message_prioritizer_test.go b/protocol/v2/ssv/queue/message_prioritizer_test.go index f07e5e2691..deb3654b45 100644 --- a/protocol/v2/ssv/queue/message_prioritizer_test.go +++ b/protocol/v2/ssv/queue/message_prioritizer_test.go @@ -17,7 +17,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) var messagePriorityTests = []struct { @@ -125,7 +124,7 @@ func TestMessagePrioritizer(t *testing.T) { messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { var err error - messages[i], err = DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + messages[i], err = DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) } diff --git a/protocol/v2/ssv/queue/messages.go b/protocol/v2/ssv/queue/messages.go index 01c6fb945c..f69644eee7 100644 --- a/protocol/v2/ssv/queue/messages.go +++ b/protocol/v2/ssv/queue/messages.go @@ -1,25 +1,31 @@ package queue import ( + "fmt" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/pkg/errors" - "go.uber.org/zap" ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) +var ( + ErrUnknownMessageType = fmt.Errorf("unknown message type") +) + // DecodedSSVMessage is a bundle of SSVMessage and it's decoding. +// TODO: try to make it generic type DecodedSSVMessage struct { *spectypes.SSVMessage // Body is the decoded Data. - Body interface{} // *SignedMessage | *SignedPartialSignatureMessage + Body interface{} // *SignedMessage | *SignedPartialSignatureMessage | *EventMsg } // DecodeSSVMessage decodes an SSVMessage and returns a DecodedSSVMessage. -func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { +func DecodeSSVMessage(m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { var body interface{} switch m.MsgType { case spectypes.SSVConsensusMsgType: // TODO: Or message.SSVDecidedMsgType? @@ -40,6 +46,8 @@ func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVM return nil, errors.Wrap(err, "failed to decode EventMsg") } body = msg + default: + return nil, ErrUnknownMessageType } return &DecodedSSVMessage{ SSVMessage: m, diff --git a/protocol/v2/ssv/queue/metrics.go b/protocol/v2/ssv/queue/metrics.go index 99d3c30ad3..36206704cc 100644 --- a/protocol/v2/ssv/queue/metrics.go +++ b/protocol/v2/ssv/queue/metrics.go @@ -1,14 +1,12 @@ package queue import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + spectypes "github.com/bloxapp/ssv-spec/types" ) // Metrics records metrics about the Queue. type Metrics interface { - // Dropped increments the number of messages dropped from the Queue. - Dropped() + DroppedQueueMessage(messageID spectypes.MessageID) } type queueWithMetrics struct { @@ -27,35 +25,8 @@ func WithMetrics(q Queue, metrics Metrics) Queue { func (q *queueWithMetrics) TryPush(msg *DecodedSSVMessage) bool { pushed := q.Queue.TryPush(msg) if !pushed { - q.metrics.Dropped() + q.metrics.DroppedQueueMessage(msg.GetID()) } - return pushed -} - -// TODO: move to metrics/prometheus package -type prometheusMetrics struct { - dropped prometheus.Counter -} - -// NewPrometheusMetrics returns a Prometheus implementation of Metrics. -func NewPrometheusMetrics(messageID string) Metrics { - return &prometheusMetrics{ - dropped: metricMessageDropped.WithLabelValues(messageID), - } -} - -func (m *prometheusMetrics) Dropped() { - m.dropped.Inc() -} -// Register Prometheus metrics. -var ( - metricMessageDropped = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:ibft:msgq:drops", - Help: "The amount of message dropped from the validator's msg queue", - }, []string{"msg_id"}) -) - -func init() { - _ = prometheus.Register(metricMessageDropped) + return pushed } diff --git a/protocol/v2/ssv/queue/queue_test.go b/protocol/v2/ssv/queue/queue_test.go index a835779566..4b46c0e045 100644 --- a/protocol/v2/ssv/queue/queue_test.go +++ b/protocol/v2/ssv/queue/queue_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" "golang.org/x/text/language" "golang.org/x/text/message" ) @@ -109,7 +109,7 @@ func TestPriorityQueue_Pop(t *testing.T) { queue := New(capacity) require.True(t, queue.Empty()) - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) // Push messages. @@ -163,7 +163,7 @@ func TestPriorityQueue_Order(t *testing.T) { // Decode messages. messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { - mm, err := DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + mm, err := DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) messages[i] = mm } @@ -184,30 +184,32 @@ func TestPriorityQueue_Order(t *testing.T) { } } -type mockMetrics struct { - dropped int +type testMetrics struct { + dropped atomic.Uint64 } -func (m *mockMetrics) Dropped() { m.dropped++ } +func (n *testMetrics) DroppedQueueMessage(messageID spectypes.MessageID) { + n.dropped.Add(1) +} func TestWithMetrics(t *testing.T) { - var metrics mockMetrics - queue := WithMetrics(New(1), &metrics) + metrics := &testMetrics{} + queue := WithMetrics(New(1), metrics) require.True(t, queue.Empty()) // Push 1 message. - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) pushed := queue.TryPush(msg) require.True(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 0, metrics.dropped) + require.EqualValues(t, 0, metrics.dropped.Load()) // Push above capacity. pushed = queue.TryPush(msg) require.False(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 1, metrics.dropped) + require.EqualValues(t, 1, metrics.dropped.Load()) } func BenchmarkPriorityQueue_Parallel(b *testing.B) { @@ -234,7 +236,7 @@ func benchmarkPriorityQueueParallel(b *testing.B, factory func() Queue, lossy bo messages := make([]*DecodedSSVMessage, messageCount) for i := range messages { var err error - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(b, err) messages[i] = msg } @@ -359,7 +361,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { for _, i := range rand.Perm(messageCount) { height := qbft.FirstHeight + qbft.Height(i) for _, t := range types { - decoded, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) + decoded, err := DecodeSSVMessage(mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) require.NoError(b, err) msgs <- decoded } @@ -412,7 +414,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { } func decodeAndPush(t require.TestingT, queue Queue, msg mockMessage, state *State) *DecodedSSVMessage { - decoded, err := DecodeSSVMessage(zap.L(), msg.ssvMessage(state)) + decoded, err := DecodeSSVMessage(msg.ssvMessage(state)) require.NoError(t, err) queue.Push(decoded) return decoded diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index b59d404907..ac305585e2 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -54,7 +54,8 @@ type BaseRunner struct { BeaconRoleType spectypes.BeaconRole // implementation vars - TimeoutF TimeoutF `json:"-"` + TimeoutF TimeoutF `json:"-"` + VerifySignatures bool `json:"-"` // highestDecidedSlot holds the highest decided duty slot and gets updated after each decided is reached highestDecidedSlot spec.Slot diff --git a/protocol/v2/ssv/runner/runner_signatures.go b/protocol/v2/ssv/runner/runner_signatures.go index 96b2a723f5..edfc608ea7 100644 --- a/protocol/v2/ssv/runner/runner_signatures.go +++ b/protocol/v2/ssv/runner/runner_signatures.go @@ -3,10 +3,11 @@ package runner import ( spec "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/types" ssz "github.com/ferranbt/fastssz" "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" + + "github.com/bloxapp/ssv/protocol/v2/types" ) func (b *BaseRunner) signBeaconObject( @@ -57,13 +58,15 @@ func (b *BaseRunner) validatePartialSigMsgForSlot( return errors.New("invalid partial sig slot") } - if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { - return errors.Wrap(err, "failed to verify PartialSignature") - } + if b.VerifySignatures { + if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { + return errors.Wrap(err, "failed to verify PartialSignature") + } - for _, msg := range signedMsg.Message.Messages { - if err := b.verifyBeaconPartialSignature(msg); err != nil { - return errors.Wrap(err, "could not verify Beacon partial Signature") + for _, msg := range signedMsg.Message.Messages { + if err := b.verifyBeaconPartialSignature(msg); err != nil { + return errors.Wrap(err, "could not verify Beacon partial Signature") + } } } diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 19fd0c71c8..e4cbe76036 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -36,6 +36,9 @@ func (test *MsgProcessingSpecTest) TestName() string { func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { logger := logging.TestLogger(t) + + test.Runner.GetBaseRunner().VerifySignatures = true + v := ssvtesting.BaseValidator(logger, spectestingutils.KeySetForShare(test.Runner.GetBaseRunner().Share)) v.DutyRunners[test.Runner.GetBaseRunner().BeaconRoleType] = test.Runner v.Network = test.Runner.GetNetwork().(specqbft.Network) // TODO need to align @@ -45,7 +48,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { lastErr = v.StartDuty(logger, test.Duty) } for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue @@ -57,7 +60,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected: %v", test.ExpectedError) } else { require.NoError(t, lastErr) } diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index ccb15f0285..ba90b22767 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -2,7 +2,6 @@ package spectest import ( "encoding/json" - "fmt" "os" "reflect" "strings" @@ -19,7 +18,6 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" @@ -41,105 +39,129 @@ func TestSSVMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test + r := prepareTest(t, logger, name, test) + if r != nil { + t.Run(r.name, func(t *testing.T) { + t.Parallel() + r.test(t) + }) + } + } +} - testName := strings.Split(name, "_")[1] - testType := strings.Split(name, "_")[0] +type runnable struct { + name string + test func(t *testing.T) +} - fmt.Printf("--------- %s - %s \n", testType, testName) +func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{}) *runnable { + testName := strings.Split(name, "_")[1] + testType := strings.Split(name, "_")[0] - switch testType { - case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &MsgProcessingSpecTest{ - Runner: &runner.AttesterRunner{}, - } - // TODO fix blinded test - if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { - continue - } - require.NoError(t, json.Unmarshal(byts, &typedTest)) + switch testType { + case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &MsgProcessingSpecTest{ + Runner: &runner.AttesterRunner{}, + } + // TODO fix blinded test + if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { + return nil + } + require.NoError(t, json.Unmarshal(byts, &typedTest)) - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunMsgProcessing(t, typedTest) - }) - case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*MsgProcessingSpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiMsgProcessingSpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): + typedTest := &MultiMsgProcessingSpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &messages.MsgSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &messages.MsgSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.SpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.SpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.MultiSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.MultiSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunSyncCommitteeAggProof(t, typedTest) - }) - case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*StartNewRunnerDutySpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiStartNewRunnerDutySpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): + typedTest := &MultiStartNewRunnerDutySpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) + } typedTest.Run(t, logger) - }) - default: - t.Fatalf("unsupported test type %s [%s]", testType, testName) + }, } + default: + t.Fatalf("unsupported test type %s [%s]", testType, testName) + return nil } } diff --git a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go index 9e12cab157..2fd4091732 100644 --- a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go +++ b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go @@ -24,7 +24,7 @@ func RunSyncCommitteeAggProof(t *testing.T, test *synccommitteeaggregator.SyncCo lastErr := v.StartDuty(logger, &testingutils.TestingSyncCommitteeContributionDuty) for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue diff --git a/protocol/v2/ssv/validator/metrics.go b/protocol/v2/ssv/validator/metrics.go new file mode 100644 index 0000000000..ce1840736b --- /dev/null +++ b/protocol/v2/ssv/validator/metrics.go @@ -0,0 +1,45 @@ +package validator + +import ( + "time" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" +) + +type Metrics interface { + ValidatorInactive(publicKey []byte) + ValidatorNoIndex(publicKey []byte) + ValidatorError(publicKey []byte) + ValidatorReady(publicKey []byte) + ValidatorNotActivated(publicKey []byte) + ValidatorExiting(publicKey []byte) + ValidatorSlashed(publicKey []byte) + ValidatorNotFound(publicKey []byte) + ValidatorPending(publicKey []byte) + ValidatorRemoved(publicKey []byte) + ValidatorUnknown(publicKey []byte) + + queue.Metrics +} + +type NopMetrics struct{} + +func (n NopMetrics) ValidatorInactive([]byte) {} +func (n NopMetrics) ValidatorNoIndex([]byte) {} +func (n NopMetrics) ValidatorError([]byte) {} +func (n NopMetrics) ValidatorReady([]byte) {} +func (n NopMetrics) ValidatorNotActivated([]byte) {} +func (n NopMetrics) ValidatorExiting([]byte) {} +func (n NopMetrics) ValidatorSlashed([]byte) {} +func (n NopMetrics) ValidatorNotFound([]byte) {} +func (n NopMetrics) ValidatorPending([]byte) {} +func (n NopMetrics) ValidatorRemoved([]byte) {} +func (n NopMetrics) ValidatorUnknown([]byte) {} +func (n NopMetrics) IncomingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) OutgoingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) DroppedQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) MessageQueueSize(int) {} +func (n NopMetrics) MessageQueueCapacity(int) {} +func (n NopMetrics) MessageTimeInQueue(spectypes.MessageID, time.Duration) {} diff --git a/protocol/v2/ssv/validator/msgqueue_consumer.go b/protocol/v2/ssv/validator/msgqueue_consumer.go index 7ba5efb119..ba82efa396 100644 --- a/protocol/v2/ssv/validator/msgqueue_consumer.go +++ b/protocol/v2/ssv/validator/msgqueue_consumer.go @@ -28,7 +28,8 @@ type queueContainer struct { // HandleMessage handles a spectypes.SSVMessage. // TODO: accept DecodedSSVMessage once p2p is upgraded to decode messages during validation. -func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +// TODO: get rid of logger, add context +func (v *Validator) HandleMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { v.mtx.RLock() // read v.Queues defer v.mtx.RUnlock() @@ -37,22 +38,13 @@ func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) // fields.Role(msg.MsgID.GetRoleType())) if q, ok := v.Queues[msg.MsgID.GetRoleType()]; ok { - decodedMsg, err := queue.DecodeSSVMessage(logger, msg) - if err != nil { - logger.Warn("❗ failed to decode message", - zap.Error(err), - zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), - zap.String("msg_id", msg.MsgID.String()), - ) - return - } - if pushed := q.Q.TryPush(decodedMsg); !pushed { + if pushed := q.Q.TryPush(msg); !pushed { msgID := msg.MsgID.String() logger.Warn("❗ dropping message because the queue is full", zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), zap.String("msg_id", msgID)) } - // logger.Debug("📬 queue: pushed message", fields.MessageID(decodedMsg.MsgID), fields.MessageType(decodedMsg.MsgType)) + // logger.Debug("📬 queue: pushed message", fields.MessageID(msg.MsgID), fields.MessageType(msg.MsgType)) } else { logger.Error("❌ missing queue for role type", fields.Role(msg.MsgID.GetRoleType())) } diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index 3d03a44d4e..e1bcf47df7 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -9,6 +9,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" qbftcontroller "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -21,9 +22,10 @@ type NonCommitteeValidator struct { func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID, opts Options) *NonCommitteeValidator { // currently, only need domain & storage config := &qbft.Config{ - Domain: types.GetDefaultDomain(), - Storage: opts.Storage.Get(identifier.GetRoleType()), - Network: opts.Network, + Domain: types.GetDefaultDomain(), + Storage: opts.Storage.Get(identifier.GetRoleType()), + Network: opts.Network, + SignatureVerification: opts.VerifySignatures, } ctrl := qbftcontroller.NewController(identifier[:], &opts.SSVShare.Share, types.GetDefaultDomain(), config, opts.FullNode) ctrl.StoredInstances = make(qbftcontroller.InstanceContainer, 0, nonCommitteeInstanceContainerCapacity(opts.FullNode)) @@ -39,7 +41,7 @@ func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID } } -func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { logger = logger.With(fields.PubKey(msg.MsgID.GetPubKey()), fields.Role(msg.MsgID.GetRoleType())) if err := validateMessage(ncv.Share.Share, msg); err != nil { diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index b219e58c6a..9c2e0d81a7 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -6,6 +6,7 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" qbftctrl "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" @@ -31,6 +32,9 @@ type Options struct { BuilderProposals bool QueueSize int GasLimit uint64 + MessageValidator validation.MessageValidator + Metrics Metrics + VerifySignatures bool } func (o *Options) defaults() { diff --git a/protocol/v2/ssv/validator/timer.go b/protocol/v2/ssv/validator/timer.go index 6fea69d2fb..6b819b992b 100644 --- a/protocol/v2/ssv/validator/timer.go +++ b/protocol/v2/ssv/validator/timer.go @@ -36,7 +36,7 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID logger.Debug("❗ failed to create timer msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, msg) + dec, err := queue.DecodeSSVMessage(msg) if err != nil { logger.Debug("❌ failed to decode timer msg", zap.Error(err)) return diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index 7f1dd80d2e..0fa54de66a 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -13,6 +13,7 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" @@ -39,24 +40,31 @@ type Validator struct { dutyIDs *hashmap.Map[spectypes.BeaconRole, string] state uint32 + + messageValidator validation.MessageValidator } // NewValidator creates a new instance of Validator. func NewValidator(pctx context.Context, cancel func(), options Options) *Validator { options.defaults() + if options.Metrics == nil { + options.Metrics = &NopMetrics{} + } + v := &Validator{ - mtx: &sync.RWMutex{}, - ctx: pctx, - cancel: cancel, - DutyRunners: options.DutyRunners, - Network: options.Network, - Storage: options.Storage, - Share: options.SSVShare, - Signer: options.Signer, - Queues: make(map[spectypes.BeaconRole]queueContainer), - state: uint32(NotStarted), - dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + mtx: &sync.RWMutex{}, + ctx: pctx, + cancel: cancel, + DutyRunners: options.DutyRunners, + Network: options.Network, + Storage: options.Storage, + Share: options.SSVShare, + Signer: options.Signer, + Queues: make(map[spectypes.BeaconRole]queueContainer), + state: uint32(NotStarted), + dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + messageValidator: options.MessageValidator, } for _, dutyRunner := range options.DutyRunners { @@ -65,10 +73,9 @@ func NewValidator(pctx context.Context, cancel func(), options Options) *Validat // Setup the queue. role := dutyRunner.GetBaseRunner().BeaconRoleType - msgID := spectypes.NewMsgID(types.GetDefaultDomain(), options.SSVShare.ValidatorPubKey, role).String() v.Queues[role] = queueContainer{ - Q: queue.WithMetrics(queue.New(options.QueueSize), queue.NewPrometheusMetrics(msgID)), + Q: queue.WithMetrics(queue.New(options.QueueSize), options.Metrics), queueState: &queue.State{ HasRunningInstance: false, Height: 0, @@ -111,7 +118,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess return fmt.Errorf("could not get duty runner for msg ID %v", messageID) } - if err := validateMessage(v.Share.Share, msg.SSVMessage); err != nil { + if err := validateMessage(v.Share.Share, msg); err != nil { return fmt.Errorf("message invalid for msg ID %v: %w", messageID, err) } @@ -143,7 +150,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess } } -func validateMessage(share spectypes.Share, msg *spectypes.SSVMessage) error { +func validateMessage(share spectypes.Share, msg *queue.DecodedSSVMessage) error { if !share.ValidatorPubKey.MessageIDBelongs(msg.GetID()) { return errors.New("msg ID doesn't match validator ID") } diff --git a/protocol/v2/types/bls.go b/protocol/v2/types/bls.go index 70d2b7cb0e..d4e2b39fb9 100644 --- a/protocol/v2/types/bls.go +++ b/protocol/v2/types/bls.go @@ -9,7 +9,7 @@ var blsPublicKeyCache *lru.Cache[string, bls.PublicKey] func init() { var err error - blsPublicKeyCache, err = lru.New[string, bls.PublicKey](10_000) + blsPublicKeyCache, err = lru.New[string, bls.PublicKey](128_000) if err != nil { panic(err) } diff --git a/protocol/v2/types/crypto.go b/protocol/v2/types/crypto.go index 24863a64cc..3f08b7ee5b 100644 --- a/protocol/v2/types/crypto.go +++ b/protocol/v2/types/crypto.go @@ -15,13 +15,11 @@ import ( // // TODO: rethink this function and consider moving/refactoring it. func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, domain spectypes.DomainType, sigType spectypes.SignatureType, operators []*spectypes.Operator) error { - // decode sig sign := &bls.Sign{} if err := sign.Deserialize(s); err != nil { return errors.Wrap(err, "failed to deserialize signature") } - // find operators pks := make([]bls.PublicKey, 0) for _, id := range data.GetSigners() { found := false @@ -41,13 +39,11 @@ func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, d } } - // compute root computedRoot, err := spectypes.ComputeSigningRoot(data, spectypes.ComputeSignatureDomain(domain, sigType)) if err != nil { return errors.Wrap(err, "could not compute signing root") } - // verify if res := sign.FastAggregateVerify(pks, computedRoot[:]); !res { return errors.New("failed to verify signature") } @@ -72,7 +68,6 @@ func VerifyReconstructedSignature(sig *bls.Sign, validatorPubKey []byte, root [3 return errors.Wrap(err, "could not deserialize validator pk") } - // verify reconstructed sig if res := sig.VerifyByte(&pk, root[:]); !res { return errors.New("could not reconstruct a valid signature") } diff --git a/registry/storage/shares.go b/registry/storage/shares.go index 17572f0257..321bcd15c8 100644 --- a/registry/storage/shares.go +++ b/registry/storage/shares.go @@ -206,6 +206,13 @@ func ByActiveValidator() SharesFilter { } } +// ByAttesting filters for attesting validators. +func ByAttesting() SharesFilter { + return func(share *types.SSVShare) bool { + return share.HasBeaconMetadata() && share.BeaconMetadata.IsAttesting() + } +} + // ByClusterID filters by cluster id. func ByClusterID(clusterID []byte) SharesFilter { return func(share *types.SSVShare) bool { diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 70aa1a50e3..7369391e50 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -9,7 +9,10 @@ ApprovedChanges: ["256a3dc0f1eb7abf","22b66e9a63ba145b","12c1c3a1622fb7cc","1c44 "9482fb9b6a953c48","5778a05e0976a6eb","24e2c7f54d5dd1d","2a8937e50d20faa9","587c629a67ef07ed","9d06d8e0ee4e1113","e624ec802068e711", "943be3ce709a99d3","5b3bb2d2262fe8be","c20c4c7ed8d1711d","b10c6fc7dd9eee7","c121cdaab6c1c698","e12b17f3910be26b","e47bf52e962c90af", "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f", - "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575"] + "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575","2a580187c312c79a","bf8cf93c55c1eadb","6d877e24991465e4", + "b1c8e0148a4a755","2c25abb7c776bd54","a1754e08473bd1fa","4dbab14670fa155d","2a3667a499a23b16","930379d323dd95e8","65efe31656e8814f", + "1270cef2e573f846"] + IgnoredIdentifiers: - logger ReducedPackageNames: From f029330e3793640a0d715a535044c86374639157 Mon Sep 17 00:00:00 2001 From: Anton Korpusenko Date: Tue, 10 Oct 2023 18:37:35 +0300 Subject: [PATCH 14/54] e2e tests for eth execution layer package (#1143) * improved ValidatorRemoved tests. Fixed sol contract * improved OperatorRemoved test * improved ClusterLiquidated & ClusterReactivated tests * minor fixes + do / undo tests * tests: refactored event_handler. added exporting private methods * e2e test: added happy flow for sync history * e2e testing framework init * tests: added SyncHistory / SyncOngoing working tests * tests: added comment * tests: simplified, added more generating functions * tests: added ctrl chan for stopping the sync on going tests * tests: splitted by separted files * tests: added unified interface * tests: added val removed event test * tests: added common refactoring * tests: fixed setup local test env for e2e * tests: fixed linter errors * tests: Fixed mocked validator ctrl usage. fixed tests. Added cluster liquidation event test * tests: Added cluster reactivated event handling test * tests: Added missing happy flow tests * tests: removed require usage in setuptest env functions * tests: fixed %v => %w for errors * tests: added httpSrv close on error in setupEnv * tests: refactored error handling in testEnv setup * tests: replace multiple error hundling with one defer shutdown call of testEnv * tests: added followDistance to e2e testEnv. Added minor fixes accoriding to the review * tests: changed unclear lines. Refactored default follow distance func * tests: simplified testEnv setup * tests: code style fixes, renaming for better readability, minor text fixes * tests: code style fixes. Removing extra variables * tests: formatted imports * tests: grammar fixes requested from PR review --- eth/ethtest/cluster_liquidated_test.go | 91 +++++++ eth/ethtest/cluster_reactivated_test.go | 87 +++++++ eth/ethtest/common_test.go | 231 ++++++++++++++++++ eth/ethtest/eth_e2e_test.go | 309 ++++++++++++++++++++++++ eth/ethtest/operator_added_test.go | 86 +++++++ eth/ethtest/operator_removed_test.go | 83 +++++++ eth/ethtest/set_fee_recipient_test.go | 80 ++++++ eth/ethtest/utils_test.go | 300 +++++++++++++++++++++++ eth/ethtest/validator_added_test.go | 134 ++++++++++ eth/ethtest/validator_removed_test.go | 104 ++++++++ eth/eventhandler/event_handler_test.go | 109 ++++----- 11 files changed, 1558 insertions(+), 56 deletions(-) create mode 100644 eth/ethtest/cluster_liquidated_test.go create mode 100644 eth/ethtest/cluster_reactivated_test.go create mode 100644 eth/ethtest/common_test.go create mode 100644 eth/ethtest/eth_e2e_test.go create mode 100644 eth/ethtest/operator_added_test.go create mode 100644 eth/ethtest/operator_removed_test.go create mode 100644 eth/ethtest/set_fee_recipient_test.go create mode 100644 eth/ethtest/utils_test.go create mode 100644 eth/ethtest/validator_added_test.go create mode 100644 eth/ethtest/validator_removed_test.go diff --git a/eth/ethtest/cluster_liquidated_test.go b/eth/ethtest/cluster_liquidated_test.go new file mode 100644 index 0000000000..46ae795cef --- /dev/null +++ b/eth/ethtest/cluster_liquidated_test.go @@ -0,0 +1,91 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterLiquidatedInput struct { + *CommonTestInput + events []*ClusterLiquidatedEventInput +} + +func NewTestClusterLiquidatedInput(common *CommonTestInput) *testClusterLiquidatedInput { + return &testClusterLiquidatedInput{common, nil} +} + +func (input *testClusterLiquidatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterLiquidatedEventInput struct { + auth *bind.TransactOpts + ownerAddress *ethcommon.Address + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterLiquidatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.ownerAddress == nil: + return fmt.Errorf("validation error: input.ownerAddress is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterLiquidatedInput) prepare( + eventsToDo []*ClusterLiquidatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterLiquidatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Liquidate( + event.auth, + *event.ownerAddress, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/cluster_reactivated_test.go b/eth/ethtest/cluster_reactivated_test.go new file mode 100644 index 0000000000..664625f44b --- /dev/null +++ b/eth/ethtest/cluster_reactivated_test.go @@ -0,0 +1,87 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterReactivatedInput struct { + *CommonTestInput + events []*ClusterReactivatedEventInput +} + +func NewTestClusterReactivatedInput(common *CommonTestInput) *testClusterReactivatedInput { + return &testClusterReactivatedInput{common, nil} +} + +func (input *testClusterReactivatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterReactivatedEventInput struct { + auth *bind.TransactOpts + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterReactivatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterReactivatedInput) prepare( + eventsToDo []*ClusterReactivatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterReactivatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Reactivate( + event.auth, + event.opsIds, + big.NewInt(100_000_000), + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/common_test.go b/eth/ethtest/common_test.go new file mode 100644 index 0000000000..44105dee65 --- /dev/null +++ b/eth/ethtest/common_test.go @@ -0,0 +1,231 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "net/http/httptest" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/golang/mock/gomock" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/eth/eventsyncer" + "github.com/bloxapp/ssv/eth/executionclient" + "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/eth/simulator/simcontract" + "github.com/bloxapp/ssv/monitoring/metricsreporter" + "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator/mocks" +) + +type CommonTestInput struct { + t *testing.T + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + blockNum *uint64 + nodeStorage storage.Storage + doInOneBlock bool +} + +func NewCommonTestInput( + t *testing.T, + sim *simulator.SimulatedBackend, + boundContract *simcontract.Simcontract, + blockNum *uint64, + nodeStorage storage.Storage, + doInOneBlock bool, +) *CommonTestInput { + return &CommonTestInput{ + t: t, + sim: sim, + boundContract: boundContract, + blockNum: blockNum, + nodeStorage: nodeStorage, + doInOneBlock: doInOneBlock, + } +} + +type TestEnv struct { + eventSyncer *eventsyncer.EventSyncer + validators []*testValidatorData + ops []*testOperator + nodeStorage storage.Storage + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + auth *bind.TransactOpts + shares [][]byte + execClient *executionclient.ExecutionClient + rpcServer *rpc.Server + httpSrv *httptest.Server + validatorCtrl *mocks.MockController + mockCtrl *gomock.Controller + followDistance *uint64 +} + +func (e *TestEnv) shutdown() { + if e.mockCtrl != nil { + e.mockCtrl.Finish() + } + + if e.httpSrv != nil { + e.httpSrv.Close() + } + + if e.execClient != nil { + // Always returns nil error + _ = e.execClient.Close() + } +} + +func (e *TestEnv) setup( + t *testing.T, + ctx context.Context, + testAddresses []*ethcommon.Address, + validatorsCount uint64, + operatorsCount uint64, +) error { + if e.followDistance == nil { + e.SetDefaultFollowDistance() + } + logger := zaptest.NewLogger(t) + + // Create operators RSA keys + ops, err := createOperators(operatorsCount, 0) + if err != nil { + return err + } + + validators := make([]*testValidatorData, validatorsCount) + shares := make([][]byte, validatorsCount) + + // Create validators, BLS keys, shares + for i := 0; i < int(validatorsCount); i++ { + validators[i], err = createNewValidator(ops) + if err != nil { + return err + } + + shares[i], err = generateSharesData(validators[i], ops, testAddrAlice, i) + if err != nil { + return err + } + } + + eh, validatorCtrl, mockCtrl, nodeStorage, err := setupEventHandler(t, ctx, logger, ops[0], &testAddrAlice, true) + e.mockCtrl = mockCtrl + e.nodeStorage = nodeStorage + + if err != nil { + return err + } + if validatorCtrl == nil { + return fmt.Errorf("validatorCtrl is empty") + } + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) + + // Create JSON-RPC handler + rpcServer, err := sim.Node.RPCHandler() + e.rpcServer = rpcServer + if err != nil { + return fmt.Errorf("can't create RPC server: %w", err) + } + // Expose handler on a test server with ws open + httpSrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) + e.httpSrv = httpSrv + + addr := "ws:" + strings.TrimPrefix(httpSrv.URL, "http:") + + parsed, err := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) + if err != nil { + return fmt.Errorf("can't parse contract ABI: %w", err) + } + + auth, err := bind.NewKeyedTransactorWithChainID(testKeyAlice, big.NewInt(1337)) + if err != nil { + return err + } + + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + if err != nil { + return fmt.Errorf("deploy contract: %w", err) + } + + sim.Commit() + + // Check contract code at the simulated blockchain + contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + if err != nil { + return fmt.Errorf("get contract code: %w", err) + } + if len(contractCode) == 0 { + return fmt.Errorf("contractCode is empty") + } + + // Create a client and connect to the simulator + e.execClient, err = executionclient.New( + ctx, + addr, + contractAddr, + executionclient.WithLogger(logger), + executionclient.WithFollowDistance(*e.followDistance), + ) + if err != nil { + return err + } + + err = e.execClient.Healthy(ctx) + if err != nil { + return err + } + + e.boundContract, err = simcontract.NewSimcontract(contractAddr, sim) + if err != nil { + return err + } + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + + e.eventSyncer = eventsyncer.New( + nodeStorage, + e.execClient, + eh, + eventsyncer.WithLogger(logger), + eventsyncer.WithMetrics(metricsReporter), + ) + + e.validatorCtrl = validatorCtrl + e.sim = sim + e.auth = auth + e.validators = validators + e.ops = ops + e.shares = shares + + return nil +} + +func (e *TestEnv) SetDefaultFollowDistance() { + // 8 is current production offset + value := uint64(8) + e.followDistance = &value +} + +func (e *TestEnv) CloseFollowDistance(blockNum *uint64) { + for i := uint64(0); i < *e.followDistance; i++ { + commitBlock(e.sim, blockNum) + } +} + +func commitBlock(sim *simulator.SimulatedBackend, blockNum *uint64) { + sim.Commit() + *blockNum++ +} diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go new file mode 100644 index 0000000000..b38dd8ea3d --- /dev/null +++ b/eth/ethtest/eth_e2e_test.go @@ -0,0 +1,309 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +var ( + testKeyAlice, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testKeyBob, _ = crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + + testAddrAlice = crypto.PubkeyToAddress(testKeyAlice.PublicKey) + testAddrBob = crypto.PubkeyToAddress(testKeyBob.PublicKey) +) + +// E2E tests for ETH package +func TestEthExecLayer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddrAlice + testAddresses[1] = &testAddrBob + + cluster := &simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + } + + expectedNonce := registrystorage.Nonce(0) + + testEnv := TestEnv{} + testEnv.SetDefaultFollowDistance() + + defer testEnv.shutdown() + err := testEnv.setup(t, ctx, testAddresses, 7, 4) + require.NoError(t, err) + + var ( + auth = testEnv.auth + nodeStorage = testEnv.nodeStorage + sim = testEnv.sim + boundContract = testEnv.boundContract + ops = testEnv.ops + validators = testEnv.validators + eventSyncer = testEnv.eventSyncer + shares = testEnv.shares + validatorCtrl = testEnv.validatorCtrl + ) + + blockNum := uint64(0x1) + lastHandledBlockNum := uint64(0x1) + + common := NewCommonTestInput(t, sim, boundContract, &blockNum, nodeStorage, true) + // Prepare blocks with events + // Check that the state is empty before the test + // Check SyncHistory doesn't execute any tasks -> doesn't run any of Controller methods + // Check the node storage for existing of operators and a validator + t.Run("SyncHistory happy flow", func(t *testing.T) { + // BLOCK 2. produce OPERATOR ADDED + // Check that there are no registered operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 0, len(operators)) + + opAddedInput := NewOperatorAddedEventInput(common) + opAddedInput.prepare(ops, auth) + opAddedInput.produce() + + testEnv.CloseFollowDistance(&blockNum) + } + + // BLOCK 3: VALIDATOR ADDED: + // Check that there were no operations for Alice Validator + { + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{0, 1}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Run SyncHistory + lastHandledBlockNum, err = eventSyncer.SyncHistory(ctx, lastHandledBlockNum) + require.NoError(t, err) + + //check all the events were handled correctly and block number was increased + require.Equal(t, blockNum-*testEnv.followDistance, lastHandledBlockNum) + fmt.Println("lastHandledBlockNum", lastHandledBlockNum) + + // Check that operators were successfully registered + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Check that validator was registered + shares := nodeStorage.Shares().List(nil) + require.Equal(t, len(valAddInput.events), len(shares)) + + // Check the nonce was bumped + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + } + }) + + // Main difference between "online" events handling and syncing the historical (old) events + // is that here we have to check that the controller was triggered + t.Run("SyncOngoing happy flow", func(t *testing.T) { + go func() { + err = eventSyncer.SyncOngoing(ctx, lastHandledBlockNum+1) + require.NoError(t, err) + }() + + stopChan := make(chan struct{}) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-stopChan: + return + default: + time.Sleep(100 * time.Millisecond) + } + } + }() + + // Step 1: Add more validators + { + validatorCtrl.EXPECT().StartValidator(gomock.Any()).AnyTimes() + + // Check current nonce before start + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{2, 3, 4, 5, 6}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 5000) + + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + // Not sure does this make sense + require.Equal(t, uint64(testEnv.sim.Blockchain.CurrentBlock().Number.Int64()), *common.blockNum) + } + + // Step 2: remove validator + { + validatorCtrl.EXPECT().StopValidator(gomock.Any()).AnyTimes() + + shares := nodeStorage.Shares().List(nil) + require.Equal(t, 7, len(shares)) + + valRemove := NewTestValidatorRemovedEventsInput(common) + valRemove.prepare( + validators, + []uint64{0, 1}, + []uint64{1, 2, 3, 4}, + auth, + cluster, + ) + valRemove.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 500) + + shares = nodeStorage.Shares().List(nil) + require.Equal(t, 5, len(shares)) + + for _, event := range valRemove.events { + valPubKey := event.validator.masterPubKey.Serialize() + valShare := nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + } + } + + // Step 3 Liquidate Cluster + { + validatorCtrl.EXPECT().LiquidateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterLiquidate := NewTestClusterLiquidatedInput(common) + clusterLiquidate.prepare([]*ClusterLiquidatedEventInput{ + { + auth: auth, + ownerAddress: &testAddrAlice, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterLiquidate.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + } + + // Step 4 Reactivate Cluster + { + validatorCtrl.EXPECT().ReactivateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + + // Trigger the event + clusterReactivated := NewTestClusterReactivatedInput(common) + clusterReactivated.prepare([]*ClusterReactivatedEventInput{ + { + auth: auth, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterReactivated.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + shares = nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.False(t, s.Liquidated) + } + } + + // Step 5 Remove some Operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 4, len(operators)) + + opRemoved := NewOperatorRemovedEventInput(common) + opRemoved.prepare([]uint64{1, 2}, auth) + opRemoved.produce() + testEnv.CloseFollowDistance(&blockNum) + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + } + + // Step 6 Update Fee Recipient + { + validatorCtrl.EXPECT().UpdateFeeRecipient(gomock.Any(), gomock.Any()).Times(1) + + setFeeRecipient := NewSetFeeRecipientAddressInput(common) + setFeeRecipient.prepare([]*SetFeeRecipientAddressEventInput{ + {auth, &testAddrBob}, + }) + setFeeRecipient.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + recipientData, found, err := nodeStorage.GetRecipientData(nil, testAddrAlice) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, testAddrBob.String(), recipientData.FeeRecipient.String()) + } + + stopChan <- struct{}{} + }) +} diff --git a/eth/ethtest/operator_added_test.go b/eth/ethtest/operator_added_test.go new file mode 100644 index 0000000000..9a173a5064 --- /dev/null +++ b/eth/ethtest/operator_added_test.go @@ -0,0 +1,86 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/eventparser" +) + +type testOperatorAddedEventInput struct { + op *testOperator + auth *bind.TransactOpts +} + +type ProduceOperatorAddedEventsInput struct { + *CommonTestInput + events []*testOperatorAddedEventInput +} + +func NewOperatorAddedEventInput(common *CommonTestInput) *ProduceOperatorAddedEventsInput { + return &ProduceOperatorAddedEventsInput{common, nil} +} + +func (input *ProduceOperatorAddedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorAddedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.op == nil: + return fmt.Errorf("validation error: input.op is empty") + } + + return nil +} + +func (input *ProduceOperatorAddedEventsInput) prepare( + ops []*testOperator, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorAddedEventInput, len(ops)) + + for i, op := range ops { + input.events[i] = &testOperatorAddedEventInput{op, auth} + } +} + +func (input *ProduceOperatorAddedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + op := event.op + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(input.t, err) + _, err = input.boundContract.SimcontractTransactor.RegisterOperator(event.auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/operator_removed_test.go b/eth/ethtest/operator_removed_test.go new file mode 100644 index 0000000000..5b4dd27822 --- /dev/null +++ b/eth/ethtest/operator_removed_test.go @@ -0,0 +1,83 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" +) + +type testOperatorRemovedEventInput struct { + opId uint64 + auth *bind.TransactOpts +} + +type ProduceOperatorRemovedEventsInput struct { + *CommonTestInput + events []*testOperatorRemovedEventInput +} + +func NewOperatorRemovedEventInput(common *CommonTestInput) *ProduceOperatorRemovedEventsInput { + return &ProduceOperatorRemovedEventsInput{common, nil} +} + +func (input *ProduceOperatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorRemovedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.opId == 0: + return fmt.Errorf("validation error: input.opId is invalid") + } + + return nil +} + +func (input *ProduceOperatorRemovedEventsInput) prepare( + opsIds []uint64, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorRemovedEventInput, len(opsIds)) + + for i, opId := range opsIds { + input.events[i] = &testOperatorRemovedEventInput{opId, auth} + } +} + +func (input *ProduceOperatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + _, err = input.boundContract.SimcontractTransactor.RemoveOperator( + event.auth, + event.opId, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/set_fee_recipient_test.go b/eth/ethtest/set_fee_recipient_test.go new file mode 100644 index 0000000000..14ac7dd263 --- /dev/null +++ b/eth/ethtest/set_fee_recipient_test.go @@ -0,0 +1,80 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +type SetFeeRecipientAddressInput struct { + *CommonTestInput + events []*SetFeeRecipientAddressEventInput +} + +func NewSetFeeRecipientAddressInput(common *CommonTestInput) *SetFeeRecipientAddressInput { + return &SetFeeRecipientAddressInput{common, nil} +} + +func (input *SetFeeRecipientAddressInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type SetFeeRecipientAddressEventInput struct { + auth *bind.TransactOpts + address *ethcommon.Address +} + +func (input *SetFeeRecipientAddressEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.address == nil: + return fmt.Errorf("validation error: input.address is empty") + } + + return nil +} + +func (input *SetFeeRecipientAddressInput) prepare( + eventsToDo []*SetFeeRecipientAddressEventInput, +) { + input.events = eventsToDo +} + +func (input *SetFeeRecipientAddressInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.SetFeeRecipientAddress( + event.auth, + *event.address, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/utils_test.go b/eth/ethtest/utils_test.go new file mode 100644 index 0000000000..289030f7c8 --- /dev/null +++ b/eth/ethtest/utils_test.go @@ -0,0 +1,300 @@ +package ethtest + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "errors" + "fmt" + "math/big" + "testing" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/herumi/bls-eth-go-binary/bls" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/ekm" + "github.com/bloxapp/ssv/eth/contract" + "github.com/bloxapp/ssv/eth/eventhandler" + "github.com/bloxapp/ssv/eth/eventparser" + "github.com/bloxapp/ssv/eth/simulator" + ibftstorage "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/networkconfig" + operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + registrystorage "github.com/bloxapp/ssv/registry/storage" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/blskeygen" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" +) + +type testValidatorData struct { + masterKey *bls.SecretKey + masterPubKey *bls.PublicKey + masterPublicKeys bls.PublicKeys + operatorsShares []*testShare +} + +type testOperator struct { + id uint64 + rsaPub []byte + rsaPriv []byte +} + +type testShare struct { + opId uint64 + sec *bls.SecretKey + pub *bls.PublicKey +} + +func createNewValidator(ops []*testOperator) (*testValidatorData, error) { + validatorData := &testValidatorData{} + sharesCount := uint64(len(ops)) + threshold.Init() + + msk, mpk := blskeygen.GenBLSKeyPair() + secVec := msk.GetMasterSecretKey(int(sharesCount)) + pubKeys := bls.GetMasterPublicKey(secVec) + splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) + if err != nil { + return nil, err + } + + validatorData.operatorsShares = make([]*testShare, sharesCount) + + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { + validatorData.operatorsShares[i-1] = &testShare{ + opId: i, + sec: splitKeys[i], + pub: splitKeys[i].GetPublicKey(), + } + } + + validatorData.masterKey = msk + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys + + return validatorData, nil +} + +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) + + for i := uint64(1); i <= num; i++ { + pb, sk, err := rsaencryption.GenerateKeys() + if err != nil { + return nil, err + } + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, + } + } + + return testOps, nil +} + +func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { + var pubKeys []byte + var encryptedShares []byte + + for i, op := range operators { + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) + if err != nil { + return nil, fmt.Errorf("can't convert public key: %w", err) + } + + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) + if err != nil { + return nil, fmt.Errorf("can't encrypt share: %w", err) + } + + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) + if err != nil { + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) + } + + // check that we encrypt right + shareSecret := &bls.SecretKey{} + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) + if err != nil { + return nil, err + } + if err = shareSecret.SetHexString(string(decryptedSharePrivateKey)); err != nil { + return nil, err + } + + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) + + } + + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) + sig := signed.Serialize() + + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") + } + + sharesData := append(pubKeys, encryptedShares...) + sharesDataSigned := append(sig, sharesData...) + + return sharesDataSigned, nil +} + +func setupEventHandler( + t *testing.T, + ctx context.Context, + logger *zap.Logger, + operator *testOperator, + ownerAddress *ethcommon.Address, + useMockCtrl bool, +) (*eventhandler.EventHandler, *mocks.MockController, *gomock.Controller, operatorstorage.Storage, error) { + db, err := kv.NewInMemory(logger, basedb.Options{ + Ctx: ctx, + }) + if err != nil { + return nil, nil, nil, nil, err + } + + storageMap := ibftstorage.NewStores() + nodeStorage, operatorData := setupOperatorStorage(logger, db, operator, ownerAddress) + testNetworkConfig := networkconfig.TestNetwork + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if err != nil { + return nil, nil, nil, nil, err + } + + ctrl := gomock.NewController(t) + bc := beacon.NewMockBeaconNode(ctrl) + + contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) + if err != nil { + return nil, nil, nil, nil, err + } + + if useMockCtrl { + validatorCtrl := mocks.NewMockController(ctrl) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + + if err != nil { + return nil, nil, nil, nil, err + } + + validatorCtrl.EXPECT().GetOperatorData().Return(operatorData).AnyTimes() + + return eh, validatorCtrl, ctrl, nodeStorage, nil + } + + validatorCtrl := validator.NewController(logger, validator.ControllerOptions{ + Context: ctx, + DB: db, + RegistryStorage: nodeStorage, + KeyManager: keyManager, + StorageMap: storageMap, + OperatorData: operatorData, + }) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + if err != nil { + return nil, nil, nil, nil, err + } + + return eh, nil, ctrl, nodeStorage, nil +} + +func setupOperatorStorage( + logger *zap.Logger, + db basedb.Database, + operator *testOperator, + ownerAddress *ethcommon.Address, +) (operatorstorage.Storage, *registrystorage.OperatorData) { + if operator == nil { + logger.Fatal("empty test operator was passed") + } + + nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) + if err != nil { + logger.Fatal("failed to create node storage", zap.Error(err)) + } + + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) + if err != nil { + logger.Fatal("couldn't setup operator private key", zap.Error(err)) + } + + _, found, err := nodeStorage.GetPrivateKey() + if err != nil || !found { + logger.Fatal("failed to get operator private key", zap.Error(err)) + } + var operatorData *registrystorage.OperatorData + operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) + + if err != nil { + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) + } + if !found { + operatorData = ®istrystorage.OperatorData{ + PublicKey: operatorPubKey, + ID: operator.id, + OwnerAddress: *ownerAddress, + } + } + + return nodeStorage, operatorData +} + +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + + return simulator.NewSimulatedBackend( + genesis, 50_000_000, + ) +} diff --git a/eth/ethtest/validator_added_test.go b/eth/ethtest/validator_added_test.go new file mode 100644 index 0000000000..2497552e7f --- /dev/null +++ b/eth/ethtest/validator_added_test.go @@ -0,0 +1,134 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +type testValidatorRegisteredInput struct { + *CommonTestInput + events []*validatorRegisteredEventInput +} + +func NewTestValidatorRegisteredInput(common *CommonTestInput) *testValidatorRegisteredInput { + return &testValidatorRegisteredInput{common, nil} +} + +func (input *testValidatorRegisteredInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type validatorRegisteredEventInput struct { + auth *bind.TransactOpts + ops []*testOperator + validator *testValidatorData + share []byte + opsIds []uint64 // separating opsIds from ops as it is a separate event field and should be used for destructive tests +} + +func (input *validatorRegisteredEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.share) == 0: + return fmt.Errorf("validation error: input.share is empty") + case len(input.ops) == 0: + return fmt.Errorf("validation error: input.ops is empty") + } + + if len(input.opsIds) == 0 { + input.opsIds = make([]uint64, len(input.ops)) + for i, op := range input.ops { + input.opsIds[i] = op.id + } + } + + return nil +} + +func (input *testValidatorRegisteredInput) prepare( + validators []*testValidatorData, + shares [][]byte, + ops []*testOperator, + auth *bind.TransactOpts, + expectedNonce *registrystorage.Nonce, + validatorsIds []uint32, +) { + input.events = make([]*validatorRegisteredEventInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + // Check there are no shares in the state for the current validator + valPubKey := validators[validatorId].masterPubKey.Serialize() + share := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, share) + + // Create event input + input.events[i] = &validatorRegisteredEventInput{ + validator: validators[validatorId], + share: shares[validatorId], + auth: auth, + ops: ops, + } + + // expect nonce bumping after each of these ValidatorAdded events handling + *expectedNonce++ + } +} + +func (input *testValidatorRegisteredInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + val := event.validator + valPubKey := val.masterPubKey.Serialize() + shares := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, shares) + + // Call the contract method + _, err := input.boundContract.SimcontractTransactor.RegisterValidator( + event.auth, + val.masterPubKey.Serialize(), + event.opsIds, + event.share, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/validator_removed_test.go b/eth/ethtest/validator_removed_test.go new file mode 100644 index 0000000000..778b67dff8 --- /dev/null +++ b/eth/ethtest/validator_removed_test.go @@ -0,0 +1,104 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testValidatorRemovedInput struct { + auth *bind.TransactOpts + validator *testValidatorData + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *testValidatorRemovedInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +type TestValidatorRemovedEventsInput struct { + *CommonTestInput + events []*testValidatorRemovedInput +} + +func (input *TestValidatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: empty CommonTestInput") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +func NewTestValidatorRemovedEventsInput(common *CommonTestInput) *TestValidatorRemovedEventsInput { + return &TestValidatorRemovedEventsInput{common, nil} +} + +func (input *TestValidatorRemovedEventsInput) prepare( + validators []*testValidatorData, + validatorsIds []uint64, + opsIds []uint64, + auth *bind.TransactOpts, + cluster *simcontract.CallableCluster, +) { + input.events = make([]*testValidatorRemovedInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + input.events[i] = &testValidatorRemovedInput{ + auth, + validators[validatorId], + opsIds, + cluster, + } + } +} + +func (input *TestValidatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + valPubKey := event.validator.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := input.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(input.t, valShare) + + _, err = input.boundContract.SimcontractTransactor.RemoveValidator( + event.auth, + valPubKey, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index fa55d0dab6..070de44d04 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -151,7 +151,7 @@ func TestHandleBlockEventsStream(t *testing.T) { for _, op := range ops { // Call the contract method - packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.pub) + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) require.NoError(t, err) _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) require.NoError(t, err) @@ -170,7 +170,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }() // Check that there is no registered operators - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, 0, len(operators)) @@ -181,7 +181,7 @@ func TestHandleBlockEventsStream(t *testing.T) { blockNum++ // Check storage for the new operators - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) @@ -193,7 +193,7 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, operatorAddedEvent.OperatorId, data.ID) require.Equal(t, operatorAddedEvent.Owner, data.OwnerAddress) - require.Equal(t, ops[i].pub, data.PublicKey) + require.Equal(t, ops[i].rsaPub, data.PublicKey) } }) @@ -217,7 +217,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }() // Check that there is 1 registered operator - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) @@ -228,7 +228,7 @@ func TestHandleBlockEventsStream(t *testing.T) { blockNum++ // Check if the operator wasn't removed successfully - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) }) @@ -242,7 +242,7 @@ func TestHandleBlockEventsStream(t *testing.T) { operatorsCount++ // Call the contract method - packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op[0].pub) + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op[0].rsaPub) require.NoError(t, err) _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) require.NoError(t, err) @@ -260,7 +260,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }() // Check that there is no registered operators - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) @@ -270,7 +270,7 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) blockNum++ // Check storage for the new operator - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops)+1, len(operators)) @@ -290,7 +290,7 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops)+1, len(operators)) @@ -302,7 +302,7 @@ func TestHandleBlockEventsStream(t *testing.T) { // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved // Check if the operator was removed successfully - //operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) //require.NoError(t, err) //require.Equal(t, len(ops), len(operators)) }) @@ -980,7 +980,7 @@ func TestHandleBlockEventsStream(t *testing.T) { t.Run("test OperatorAdded + OperatorRemoved events handling", func(t *testing.T) { // There are 5 ops before the test running // Check that there is no registered operators - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, operatorsCount, uint64(len(operators))) @@ -990,7 +990,7 @@ func TestHandleBlockEventsStream(t *testing.T) { op := tmpOps[0] // Call the RegisterOperator contract method - packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.pub) + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) require.NoError(t, err) _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) require.NoError(t, err) @@ -1020,7 +1020,7 @@ func TestHandleBlockEventsStream(t *testing.T) { // #TODO: Fails until we fix the OperatorAdded: handlers.go #108 // Check storage for the new operators - //operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) //require.NoError(t, err) //require.Equal(t, operatorsCount-1, uint64(len(operators))) // @@ -1254,7 +1254,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, ne func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *testOperator) (operatorstorage.Storage, *registrystorage.OperatorData) { if operator == nil { - logger.Fatal("empty test operator was passed", zap.Error(fmt.Errorf("empty test operator was passed"))) + logger.Fatal("empty test operator was passed") } nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) @@ -1262,9 +1262,9 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test logger.Fatal("failed to create node storage", zap.Error(err)) } - operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.priv)) + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) if err != nil { - logger.Fatal("could not setup operator private key", zap.Error(err)) + logger.Fatal("couldn't setup operator private key", zap.Error(err)) } _, found, err := nodeStorage.GetPrivateKey() @@ -1275,7 +1275,7 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) if err != nil { - logger.Fatal("could not get operator data by public key", zap.Error(err)) + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) } if !found { operatorData = ®istrystorage.OperatorData{ @@ -1306,15 +1306,13 @@ func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBack } return simulator.NewSimulatedBackend( - genesis, 10000000, + genesis, 50_000_000, ) } func TestCreatingSharesData(t *testing.T) { - owner := testAddr nonce := 0 - // ops, err := createOperators(4, 1) require.NoError(t, err) @@ -1340,7 +1338,7 @@ func TestCreatingSharesData(t *testing.T) { encryptedKeys := splitBytes(sharesData[pubKeysOffset:], len(sharesData[pubKeysOffset:])/operatorCount) for i, enck := range encryptedKeys { - priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].priv)) + priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].rsaPriv)) require.NoError(t, err) decryptedSharePrivateKey, err := rsaencryption.DecodeKey(priv, enck) require.NoError(t, err) @@ -1361,9 +1359,9 @@ type testValidatorData struct { } type testOperator struct { - id uint64 - pub []byte // rsa pub - priv []byte // rsa sk + id uint64 + rsaPub []byte + rsaPriv []byte } type testShare struct { @@ -1386,19 +1384,18 @@ func createNewValidator(ops []*testOperator) (*testValidatorData, error) { sharesCount := uint64(len(ops)) threshold.Init() - msk, pubk := blskeygen.GenBLSKeyPair() + msk, mpk := blskeygen.GenBLSKeyPair() secVec := msk.GetMasterSecretKey(int(sharesCount)) - pubks := bls.GetMasterPublicKey(secVec) + pubKeys := bls.GetMasterPublicKey(secVec) splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) if err != nil { return nil, err } - num := uint64(len(ops)) - validatorData.operatorsShares = make([]*testShare, num) + validatorData.operatorsShares = make([]*testShare, sharesCount) - // derive a `hareCount` number of shares - for i := uint64(1); i <= num; i++ { + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { validatorData.operatorsShares[i-1] = &testShare{ opId: i, sec: splitKeys[i], @@ -1407,54 +1404,54 @@ func createNewValidator(ops []*testOperator) (*testValidatorData, error) { } validatorData.masterKey = msk - validatorData.masterPubKey = pubk - validatorData.masterPublicKeys = pubks + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys return validatorData, nil } func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { - testops := make([]*testOperator, num) + testOps := make([]*testOperator, num) for i := uint64(1); i <= num; i++ { pb, sk, err := rsaencryption.GenerateKeys() if err != nil { return nil, err } - testops[i-1] = &testOperator{ - id: idOffset + i, - pub: pb, - priv: sk, + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, } } - return testops, nil + return testOps, nil } func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { - var pubkeys []byte + var pubKeys []byte var encryptedShares []byte for i, op := range operators { - rsakey, err := rsaencryption.ConvertPemToPublicKey(op.pub) + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) if err != nil { - return nil, fmt.Errorf("cant convert publickey: %w", err) + return nil, fmt.Errorf("can't convert public key: %w", err) } - rawshare := validatorData.operatorsShares[i].sec.SerializeToHexStr() - ciphertext, err := rsa.EncryptPKCS1v15(rand.Reader, rsakey, []byte(rawshare)) + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) if err != nil { - return nil, errors.New("cant encrypt share") + return nil, fmt.Errorf("can't encrypt share: %w", err) } - rsapriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.priv)) + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) if err != nil { - return nil, err + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) } // check that we encrypt right shareSecret := &bls.SecretKey{} - decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsapriv, ciphertext) + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) if err != nil { return nil, err } @@ -1462,21 +1459,21 @@ func generateSharesData(validatorData *testValidatorData, operators []*testOpera return nil, err } - pubkeys = append(pubkeys, validatorData.operatorsShares[i].pub.Serialize()...) - encryptedShares = append(encryptedShares, ciphertext...) + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) } - tosign := fmt.Sprintf("%s:%d", owner.String(), nonce) - msghash := crypto.Keccak256([]byte(tosign)) - signed := validatorData.masterKey.Sign(string(msghash)) + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) sig := signed.Serialize() - if !signed.VerifyByte(validatorData.masterPubKey, msghash) { - return nil, errors.New("couldn't sign correctly") + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") } - sharesData := append(pubkeys, encryptedShares...) + sharesData := append(pubKeys, encryptedShares...) sharesDataSigned := append(sig, sharesData...) return sharesDataSigned, nil From 090b237a3448252ce938b5e85636c754ea709307 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 10 Oct 2023 20:30:27 +0200 Subject: [PATCH 15/54] New slot ticker (#1149) * feat: new slot_ticker prototype * feat: replace old slot ticker with new one * chore: fix ticker tests * chore: review changes * resovled another review comments * add review changes * update after review * add more strict check on skipping test * deploy to 5--8 for testing * fix passing worng args * add metrics for slot delay * change to histrogram * triger ci * update Grafana dashboard * add late duty log info * add log to warn of late duty execution * Revert "add late duty log info" This reverts commit 8cd5442a0f945c6ce3d1e72a3774f7e7a4c5bb23. * Revert "deploy to 5--8 for testing" This reverts commit e862fea24bcb7e2d904502b5275c7802a615f3f1. --------- Co-authored-by: Matus Kysel Co-authored-by: moshe-blox --- beacon/goclient/goclient.go | 9 +- beacon/goclient/proposer.go | 13 +- cli/operator/node.go | 14 +- migrations/migration_2_encrypt_shares.go | 1 + monitoring/grafana/dashboard_ssv_node.json | 291 +++++++++++++++++---- networkconfig/config.go | 5 + operator/duties/attester.go | 3 +- operator/duties/base_handler.go | 10 +- operator/duties/base_handler_mock.go | 4 +- operator/duties/mocks/scheduler.go | 30 ++- operator/duties/proposer.go | 3 +- operator/duties/scheduler.go | 43 ++- operator/duties/scheduler_test.go | 87 ++++-- operator/duties/sync_committee.go | 3 +- operator/duties/validatorregistration.go | 5 +- operator/fee_recipient/controller.go | 59 +++-- operator/fee_recipient/controller_test.go | 66 +++-- operator/node.go | 23 +- operator/slot_ticker/mocks/ticker.go | 63 ----- operator/slot_ticker/slotticker.go | 88 ------- operator/slot_ticker/ticker.go | 84 ------ operator/slotticker/mocks/slotticker.go | 115 ++++++++ operator/slotticker/slotticker.go | 96 +++++++ operator/slotticker/slotticker_test.go | 179 +++++++++++++ 24 files changed, 883 insertions(+), 411 deletions(-) delete mode 100644 operator/slot_ticker/mocks/ticker.go delete mode 100644 operator/slot_ticker/slotticker.go delete mode 100644 operator/slot_ticker/ticker.go create mode 100644 operator/slotticker/mocks/slotticker.go create mode 100644 operator/slotticker/slotticker.go create mode 100644 operator/slotticker/slotticker_test.go diff --git a/beacon/goclient/goclient.go b/beacon/goclient/goclient.go index 8fe1216155..8448ba70ff 100644 --- a/beacon/goclient/goclient.go +++ b/beacon/goclient/goclient.go @@ -20,7 +20,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ) @@ -147,7 +147,7 @@ type goClient struct { } // New init new client and go-client instance -func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTicker slot_ticker.Ticker) (beaconprotocol.BeaconNode, error) { +func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTickerProvider slotticker.Provider) (beaconprotocol.BeaconNode, error) { logger.Info("consensus client: connecting", fields.Address(opt.BeaconNodeAddr), fields.Network(string(opt.Network.BeaconNetwork))) httpClient, err := http.New(opt.Context, @@ -161,9 +161,6 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op return nil, errors.WithMessage(err, "failed to create http client") } - tickerChan := make(chan phase0.Slot, 32) - slotTicker.Subscribe(tickerChan) - client := &goClient{ log: logger, ctx: opt.Context, @@ -190,7 +187,7 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op ) // Start registration submitter. - go client.registrationSubmitter(tickerChan) + go client.registrationSubmitter(slotTickerProvider) return client, nil } diff --git a/beacon/goclient/proposer.go b/beacon/goclient/proposer.go index cb48d5e33c..38d7f4f565 100644 --- a/beacon/goclient/proposer.go +++ b/beacon/goclient/proposer.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/slotticker" ) const ( @@ -230,9 +231,15 @@ func (gc *goClient) createValidatorRegistration(pubkey []byte, feeRecipient bell return signedReg } -func (gc *goClient) registrationSubmitter(slots <-chan phase0.Slot) { - for currentSlot := range slots { - gc.submitRegistrationsFromCache(currentSlot) +func (gc *goClient) registrationSubmitter(slotTickerProvider slotticker.Provider) { + ticker := slotTickerProvider() + for { + select { + case <-gc.ctx.Done(): + return + case <-ticker.Next(): + gc.submitRegistrationsFromCache(ticker.Slot()) + } } } diff --git a/cli/operator/node.go b/cli/operator/node.go index d280bd662e..9c20e2fda0 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -43,7 +43,7 @@ import ( "github.com/bloxapp/ssv/nodeprobe" "github.com/bloxapp/ssv/operator" "github.com/bloxapp/ssv/operator/duties/dutystore" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" "github.com/bloxapp/ssv/operator/validatorsmap" @@ -137,14 +137,16 @@ var StartNodeCmd = &cobra.Command{ return currentEpoch >= cfg.P2pNetworkConfig.PermissionedActivateEpoch && currentEpoch < cfg.P2pNetworkConfig.PermissionedDeactivateEpoch } - slotTicker := slot_ticker.NewTicker(cmd.Context(), networkConfig) + slotTickerProvider := func() slotticker.SlotTicker { + return slotticker.New(networkConfig) + } cfg.ConsensusClient.Context = cmd.Context() cfg.ConsensusClient.Graffiti = []byte("SSV.Network") cfg.ConsensusClient.GasLimit = spectypes.DefaultGasLimit cfg.ConsensusClient.Network = networkConfig.Beacon.GetNetwork() - consensusClient := setupConsensusClient(logger, operatorData.ID, slotTicker) + consensusClient := setupConsensusClient(logger, operatorData.ID, slotTickerProvider) executionClient, err := executionclient.New( cmd.Context(), @@ -240,7 +242,7 @@ var StartNodeCmd = &cobra.Command{ validatorCtrl := validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) cfg.SSVOptions.ValidatorController = validatorCtrl - operatorNode = operator.New(logger, cfg.SSVOptions, slotTicker) + operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider) if cfg.MetricsAPIPort > 0 { go startMetricsHandler(cmd.Context(), logger, db, metricsReporter, cfg.MetricsAPIPort, cfg.EnableProfile) @@ -517,9 +519,9 @@ func setupP2P(logger *zap.Logger, db basedb.Database) network.P2PNetwork { func setupConsensusClient( logger *zap.Logger, operatorID spectypes.OperatorID, - slotTicker slot_ticker.Ticker, + slotTickerProvider slotticker.Provider, ) beaconprotocol.BeaconNode { - cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTicker) + cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTickerProvider) if err != nil { logger.Fatal("failed to create beacon go-client", zap.Error(err), fields.Address(cfg.ConsensusClient.BeaconNodeAddr)) diff --git a/migrations/migration_2_encrypt_shares.go b/migrations/migration_2_encrypt_shares.go index 4ca0eb62c7..03c40a301d 100644 --- a/migrations/migration_2_encrypt_shares.go +++ b/migrations/migration_2_encrypt_shares.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "crypto/x509" "fmt" + "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/storage/basedb" diff --git a/monitoring/grafana/dashboard_ssv_node.json b/monitoring/grafana/dashboard_ssv_node.json index d5568f9de2..47150acf91 100644 --- a/monitoring/grafana/dashboard_ssv_node.json +++ b/monitoring/grafana/dashboard_ssv_node.json @@ -22,7 +22,7 @@ "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 115, - "iteration": 1676023992743, + "iteration": 1696933836051, "links": [], "liveNow": false, "panels": [ @@ -2685,8 +2685,245 @@ "title": "Stream Protocols (time-series)", "transformations": [], "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 81 + }, + "id": 67, + "panels": [], + "title": "Vitals", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Rate per second of requests, responses and active requests.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#73BF69", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "10ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "20ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "100ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5000ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 82 + }, + "id": 65, + "maxDataPoints": 25, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.3.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5ms", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "10ms", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "20ms", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "100ms", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "500ms", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5000.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5000ms", + "refId": "F" + } + ], + "title": "Duty Execution Latency (5m)", + "transformations": [], + "type": "timeseries" } ], + "refresh": "", "schemaVersion": 34, "style": "dark", "tags": [], @@ -2695,8 +2932,8 @@ { "current": { "selected": false, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" }, "description": "", "hide": 1, @@ -2730,7 +2967,7 @@ "value": "ssv-node-v2-5" }, { - "selected": false, + "selected": true, "text": "ssv-node-v2-6", "value": "ssv-node-v2-6" }, @@ -2744,26 +2981,6 @@ "text": "ssv-node-v2-8", "value": "ssv-node-v2-8" }, - { - "selected": false, - "text": "ssv-node-9", - "value": "ssv-node-9" - }, - { - "selected": false, - "text": "ssv-node-10", - "value": "ssv-node-10" - }, - { - "selected": false, - "text": "ssv-node-11", - "value": "ssv-node-11" - }, - { - "selected": false, - "text": "ssv-node-12", - "value": "ssv-node-12" - }, { "selected": false, "text": "ssv-exporter", @@ -2773,29 +2990,9 @@ "selected": false, "text": "ssv-exporter-v2", "value": "ssv-exporter-v2" - }, - { - "selected": true, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - { - "selected": false, - "text": "ssv-node-v3-2", - "value": "ssv-node-v3-2" - }, - { - "selected": false, - "text": "ssv-node-v3-3", - "value": "ssv-node-v3-3" - }, - { - "selected": false, - "text": "ssv-node-v3-4", - "value": "ssv-node-v3-4" } ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-node-9,ssv-node-10,ssv-node-11,ssv-node-12,ssv-exporter,ssv-exporter-v2,ssv-node-v3-1,ssv-node-v3-2,ssv-node-v3-3,ssv-node-v3-4", + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-exporter,ssv-exporter-v2", "queryValue": "", "skipUrlSync": false, "type": "custom" @@ -2803,13 +3000,13 @@ ] }, "time": { - "from": "now-6h", + "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Node Dashboard", "uid": "QNiMrdoVz", - "version": 59, + "version": 70, "weekStart": "" } \ No newline at end of file diff --git a/networkconfig/config.go b/networkconfig/config.go index de65d48fe4..5a43b9fdc8 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -61,3 +61,8 @@ func (n NetworkConfig) SlotDurationSec() time.Duration { func (n NetworkConfig) SlotsPerEpoch() uint64 { return n.Beacon.SlotsPerEpoch() } + +// GetGenesisTime returns the genesis time in unix time. +func (n NetworkConfig) GetGenesisTime() time.Time { + return time.Unix(int64(n.Beacon.MinGenesisTime()), 0) +} diff --git a/operator/duties/attester.go b/operator/duties/attester.go index f5ab6b4b0a..9f3c1c1283 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -70,7 +70,8 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index f2a3fe5722..7e26de30b3 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -3,11 +3,11 @@ package duties import ( "context" - "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/slotticker" ) //go:generate mockgen -package=duties -destination=./base_handler_mock.go -source=./base_handler.go @@ -16,7 +16,7 @@ import ( type ExecuteDutiesFunc func(logger *zap.Logger, duties []*spectypes.Duty) type dutyHandler interface { - Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, chan phase0.Slot, chan ReorgEvent, chan struct{}) + Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, slotticker.Provider, chan ReorgEvent, chan struct{}) HandleDuties(context.Context) Name() string } @@ -27,7 +27,7 @@ type baseHandler struct { network networkconfig.NetworkConfig validatorController ValidatorController executeDuties ExecuteDutiesFunc - ticker chan phase0.Slot + ticker slotticker.SlotTicker reorg chan ReorgEvent indicesChange chan struct{} @@ -43,7 +43,7 @@ func (h *baseHandler) Setup( network networkconfig.NetworkConfig, validatorController ValidatorController, executeDuties ExecuteDutiesFunc, - ticker chan phase0.Slot, + slotTickerProvider slotticker.Provider, reorgEvents chan ReorgEvent, indicesChange chan struct{}, ) { @@ -52,7 +52,7 @@ func (h *baseHandler) Setup( h.network = network h.validatorController = validatorController h.executeDuties = executeDuties - h.ticker = ticker + h.ticker = slotTickerProvider() h.reorg = reorgEvents h.indicesChange = indicesChange } diff --git a/operator/duties/base_handler_mock.go b/operator/duties/base_handler_mock.go index 801ca2dc8c..6177f369f3 100644 --- a/operator/duties/base_handler_mock.go +++ b/operator/duties/base_handler_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" networkconfig "github.com/bloxapp/ssv/networkconfig" + slotticker "github.com/bloxapp/ssv/operator/slotticker" gomock "github.com/golang/mock/gomock" zap "go.uber.org/zap" ) @@ -64,7 +64,7 @@ func (mr *MockdutyHandlerMockRecorder) Name() *gomock.Call { } // Setup mocks base method. -func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 chan phase0.Slot, arg7 chan ReorgEvent, arg8 chan struct{}) { +func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 slotticker.Provider, arg7 chan ReorgEvent, arg8 chan struct{}) { m.ctrl.T.Helper() m.ctrl.Call(m, "Setup", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/mocks/scheduler.go index de1a092c05..e9e6c8026f 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/mocks/scheduler.go @@ -7,13 +7,13 @@ package mocks import ( context "context" reflect "reflect" + time "time" client "github.com/attestantio/go-eth2-client" v1 "github.com/attestantio/go-eth2-client/api/v1" phase0 "github.com/attestantio/go-eth2-client/spec/phase0" types "github.com/bloxapp/ssv/protocol/v2/types" gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" ) // MockSlotTicker is a mock of SlotTicker interface. @@ -39,18 +39,32 @@ func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { return m.recorder } -// Subscribe mocks base method. -func (m *MockSlotTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) return ret0 } -// Subscribe indicates an expected call of Subscribe. -func (mr *MockSlotTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockSlotTicker)(nil).Subscribe), subscription) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) } // MockBeaconNode is a mock of BeaconNode interface. diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index ffb52d42e0..89e96cca94 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -59,7 +59,8 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index e53f29bfab..0ee6979ff8 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -11,6 +11,8 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/sourcegraph/conc/pool" "go.uber.org/zap" @@ -20,11 +22,25 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/protocol/v2/types" ) //go:generate mockgen -package=mocks -destination=./mocks/scheduler.go -source=./scheduler.go +var slotDelayHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "slot_ticker_delay_milliseconds", + Help: "The delay in milliseconds of the slot ticker", + Buckets: []float64{5, 10, 20, 100, 500, 5000}, // Buckets in milliseconds. Adjust as per your needs. +}) + +func init() { + logger := zap.L() + if err := prometheus.Register(slotDelayHistogram); err != nil { + logger.Debug("could not register prometheus collector") + } +} + const ( // blockPropagationDelay time to propagate around the nodes // before kicking off duties for the block's slot. @@ -32,7 +48,8 @@ const ( ) type SlotTicker interface { - Subscribe(subscription chan phase0.Slot) event.Subscription + Next() <-chan time.Time + Slot() phase0.Slot } type BeaconNode interface { @@ -60,7 +77,7 @@ type SchedulerOptions struct { ValidatorController ValidatorController ExecuteDuty ExecuteDutyFunc IndicesChg chan struct{} - Ticker SlotTicker + SlotTickerProvider slotticker.Provider BuilderProposals bool DutyStore *dutystore.Store } @@ -69,7 +86,7 @@ type Scheduler struct { beaconNode BeaconNode network networkconfig.NetworkConfig validatorController ValidatorController - slotTicker SlotTicker + slotTickerProvider slotticker.Provider executeDuty ExecuteDutyFunc builderProposals bool @@ -78,7 +95,7 @@ type Scheduler struct { reorg chan ReorgEvent indicesChg chan struct{} - ticker chan phase0.Slot + ticker slotticker.SlotTicker waitCond *sync.Cond pool *pool.ContextPool @@ -97,7 +114,7 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { s := &Scheduler{ beaconNode: opts.BeaconNode, network: opts.Network, - slotTicker: opts.Ticker, + slotTickerProvider: opts.SlotTickerProvider, executeDuty: opts.ExecuteDuty, validatorController: opts.ValidatorController, builderProposals: opts.BuilderProposals, @@ -110,7 +127,7 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { NewSyncCommitteeHandler(dutyStore.SyncCommittee), }, - ticker: make(chan phase0.Slot), + ticker: opts.SlotTickerProvider(), reorg: make(chan ReorgEvent), waitCond: sync.NewCond(&sync.Mutex{}), } @@ -143,8 +160,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { for _, handler := range s.handlers { handler := handler - slotTicker := make(chan phase0.Slot) - s.slotTicker.Subscribe(slotTicker) indicesChangeCh := make(chan struct{}) indicesChangeFeed.Subscribe(indicesChangeCh) @@ -158,7 +173,7 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { s.network, s.validatorController, s.ExecuteDuties, - slotTicker, + s.slotTickerProvider, reorgCh, indicesChangeCh, ) @@ -170,7 +185,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { }) } - s.slotTicker.Subscribe(s.ticker) go s.SlotTicker(ctx) go indicesChangeFeed.FanOut(ctx, s.indicesChg) @@ -222,7 +236,9 @@ func (s *Scheduler) SlotTicker(ctx context.Context) { select { case <-ctx.Done(): return - case slot := <-s.ticker: + case <-s.ticker.Next(): + slot := s.ticker.Slot() + delay := s.network.SlotDurationSec() / time.Duration(goclient.IntervalsPerSlot) /* a third of the slot duration */ finalTime := s.network.Beacon.GetSlotStartTime(slot).Add(delay) waitDuration := time.Until(finalTime) @@ -330,6 +346,11 @@ func (s *Scheduler) ExecuteDuties(logger *zap.Logger, duties []*spectypes.Duty) for _, duty := range duties { duty := duty logger := s.loggerWithDutyContext(logger, duty) + slotDelay := time.Since(s.network.Beacon.GetSlotStartTime(duty.Slot)) + if slotDelay >= 100*time.Millisecond { + logger.Debug("⚠️ late duty execution", zap.Int64("slot_delay", slotDelay.Milliseconds())) + } + slotDelayHistogram.Observe(float64(slotDelay.Milliseconds())) go func() { if duty.Type == spectypes.BNRoleAttester || duty.Type == spectypes.BNRoleSyncCommittee { s.waitOneThirdOrValidBlock(duty.Slot) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 342ba9e0cd..3a98de7e7c 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -17,35 +17,80 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties/mocks" - mockslotticker "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + mockslotticker "github.com/bloxapp/ssv/operator/slotticker/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) +type MockSlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot + Subscribe() chan phase0.Slot +} + type mockSlotTicker struct { - event.Feed + slotChan chan phase0.Slot + timeChan chan time.Time + slot phase0.Slot + mu sync.Mutex +} + +func NewMockSlotTicker() MockSlotTicker { + ticker := &mockSlotTicker{ + slotChan: make(chan phase0.Slot), + timeChan: make(chan time.Time), + } + ticker.start() + return ticker +} + +func (m *mockSlotTicker) start() { + go func() { + for slot := range m.slotChan { + m.mu.Lock() + m.slot = slot + m.mu.Unlock() + m.timeChan <- time.Now() + } + }() +} + +func (m *mockSlotTicker) Next() <-chan time.Time { + return m.timeChan +} + +func (m *mockSlotTicker) Slot() phase0.Slot { + m.mu.Lock() + defer m.mu.Unlock() + return m.slot +} + +func (m *mockSlotTicker) Subscribe() chan phase0.Slot { + return m.slotChan } -func (m *mockSlotTicker) Subscribe(subscriber chan phase0.Slot) event.Subscription { - return m.Feed.Subscribe(subscriber) +type mockSlotTickerService struct { + event.Feed } func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *SlotValue) ( *Scheduler, *zap.Logger, - *mockSlotTicker, + *mockSlotTickerService, time.Duration, context.CancelFunc, *pool.ContextPool, ) { ctrl := gomock.NewController(t) - timeout := 100 * time.Millisecond + // A 200ms timeout ensures the test passes, even with mockSlotTicker overhead. + timeout := 200 * time.Millisecond ctx, cancel := context.WithCancel(context.Background()) logger := logging.TestLogger(t) mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := &mockSlotTicker{} + mockSlotService := &mockSlotTickerService{} mockNetworkConfig := networkconfig.NetworkConfig{ Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), } @@ -55,8 +100,12 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot BeaconNode: mockBeaconNode, Network: mockNetworkConfig, ValidatorController: mockValidatorController, - Ticker: mockTicker, - BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + ticker := NewMockSlotTicker() + mockSlotService.Subscribe(ticker.Subscribe()) + return ticker + }, + BuilderProposals: false, } s := NewScheduler(opts) @@ -103,7 +152,7 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot return s.Wait() }) - return s, logger, mockTicker, timeout, cancel, schedulerPool + return s, logger, mockSlotService, timeout, cancel, schedulerPool } func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.Duty, executeDutiesCallSize int) { @@ -199,7 +248,7 @@ func TestScheduler_Run(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers mockDutyHandler1 := NewMockdutyHandler(ctrl) mockDutyHandler2 := NewMockdutyHandler(ctrl) @@ -209,8 +258,10 @@ func TestScheduler_Run(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, } s := NewScheduler(opts) @@ -218,7 +269,7 @@ func TestScheduler_Run(t *testing.T) { s.handlers = []dutyHandler{mockDutyHandler1, mockDutyHandler2} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() // setup mock duty handler expectations for _, mockDutyHandler := range s.handlers { @@ -248,7 +299,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers opts := &SchedulerOptions{ @@ -256,8 +307,10 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, - IndicesChg: make(chan struct{}), + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, + IndicesChg: make(chan struct{}), BuilderProposals: true, } @@ -267,7 +320,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { // add multiple mock duty handlers s.handlers = []dutyHandler{NewValidatorRegistrationHandler()} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() err := s.Start(ctx, logger) require.NoError(t, err) diff --git a/operator/duties/sync_committee.go b/operator/duties/sync_committee.go index 7508c4012a..1d99e930a0 100644 --- a/operator/duties/sync_committee.go +++ b/operator/duties/sync_committee.go @@ -72,7 +72,8 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) buildStr := fmt.Sprintf("p%v-%v-s%v-#%v", period, epoch, slot, slot%32+1) diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index cdc2a4b605..8a3771f5af 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -36,10 +36,11 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() shares := h.validatorController.GetOperatorShares() - sent := 0 + for _, share := range shares { if !share.HasBeaconMetadata() || !share.BeaconMetadata.IsAttesting() { continue diff --git a/operator/fee_recipient/controller.go b/operator/fee_recipient/controller.go index 477b40eed1..d44f20caca 100644 --- a/operator/fee_recipient/controller.go +++ b/operator/fee_recipient/controller.go @@ -10,7 +10,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/registry/storage" @@ -25,42 +25,40 @@ type RecipientController interface { // ControllerOptions holds the needed dependencies type ControllerOptions struct { - Ctx context.Context - BeaconClient beaconprotocol.BeaconNode - Network networkconfig.NetworkConfig - ShareStorage storage.Shares - RecipientStorage storage.Recipients - Ticker slot_ticker.Ticker - OperatorData *storage.OperatorData + Ctx context.Context + BeaconClient beaconprotocol.BeaconNode + Network networkconfig.NetworkConfig + ShareStorage storage.Shares + RecipientStorage storage.Recipients + SlotTickerProvider slotticker.Provider + OperatorData *storage.OperatorData } // recipientController implementation of RecipientController type recipientController struct { - ctx context.Context - beaconClient beaconprotocol.BeaconNode - network networkconfig.NetworkConfig - shareStorage storage.Shares - recipientStorage storage.Recipients - ticker slot_ticker.Ticker - operatorData *storage.OperatorData + ctx context.Context + beaconClient beaconprotocol.BeaconNode + network networkconfig.NetworkConfig + shareStorage storage.Shares + recipientStorage storage.Recipients + slotTickerProvider slotticker.Provider + operatorData *storage.OperatorData } func NewController(opts *ControllerOptions) *recipientController { return &recipientController{ - ctx: opts.Ctx, - beaconClient: opts.BeaconClient, - network: opts.Network, - shareStorage: opts.ShareStorage, - recipientStorage: opts.RecipientStorage, - ticker: opts.Ticker, - operatorData: opts.OperatorData, + ctx: opts.Ctx, + beaconClient: opts.BeaconClient, + network: opts.Network, + shareStorage: opts.ShareStorage, + recipientStorage: opts.RecipientStorage, + slotTickerProvider: opts.SlotTickerProvider, + operatorData: opts.OperatorData, } } func (rc *recipientController) Start(logger *zap.Logger) { - tickerChan := make(chan phase0.Slot, 32) - rc.ticker.Subscribe(tickerChan) - rc.listenToTicker(logger, tickerChan) + rc.listenToTicker(logger) } // listenToTicker loop over the given slot channel @@ -68,16 +66,19 @@ func (rc *recipientController) Start(logger *zap.Logger) { // in addition, submitting "same data" every slot is not efficient and can overload beacon node // instead we can subscribe to beacon node events and submit only when there is // a new fee recipient event (or new validator) was handled or when there is a syncing issue with beacon node -func (rc *recipientController) listenToTicker(logger *zap.Logger, slots chan phase0.Slot) { +func (rc *recipientController) listenToTicker(logger *zap.Logger) { firstTimeSubmitted := false - for currentSlot := range slots { + ticker := rc.slotTickerProvider() + for { + <-ticker.Next() + slot := ticker.Slot() // submit if first time or if first slot in epoch - if firstTimeSubmitted && uint64(currentSlot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { + if firstTimeSubmitted && uint64(slot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { continue } firstTimeSubmitted = true - err := rc.prepareAndSubmit(logger, currentSlot) + err := rc.prepareAndSubmit(logger, slot) if err != nil { logger.Warn("could not submit proposal preparations", zap.Error(err)) } diff --git a/operator/fee_recipient/controller_test.go b/operator/fee_recipient/controller_test.go index 02bf4144dd..6e1718afd6 100644 --- a/operator/fee_recipient/controller_test.go +++ b/operator/fee_recipient/controller_test.go @@ -13,13 +13,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/golang/mock/gomock" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + "github.com/bloxapp/ssv/operator/slotticker/mocks" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -52,32 +52,47 @@ func TestSubmitProposal(t *testing.T) { t.Run("submit first time or halfway through epoch", func(t *testing.T) { numberOfRequests := 4 var wg sync.WaitGroup + wg.Add(numberOfRequests) // Set up the wait group before starting goroutines + client := beacon.NewMockBeaconNode(ctrl) client.EXPECT().SubmitProposalPreparation(gomock.Any()).DoAndReturn(func(feeRecipients map[phase0.ValidatorIndex]bellatrix.ExecutionAddress) error { wg.Done() return nil - }).MinTimes(numberOfRequests).MaxTimes(numberOfRequests) // call first time and on the halfway through epoch. each time should be 2 request as we have two batches + }).Times(numberOfRequests) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 1 // first time - time.Sleep(time.Millisecond * 500) - subscription <- 2 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- 20 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- phase0.Slot(network.SlotsPerEpoch()) / 2 // halfway through epoch - time.Sleep(time.Millisecond * 500) - subscription <- 63 // should not call submit - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time) + mockSlotChan := make(chan phase0.Slot) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().DoAndReturn(func() phase0.Slot { + return <-mockSlotChan + }).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) - wg.Add(numberOfRequests) + + slots := []phase0.Slot{ + 1, // first time + 2, // should not call submit + 20, // should not call submit + phase0.Slot(network.SlotsPerEpoch()) / 2, // halfway through epoch + 63, // should not call submit + } + + for _, s := range slots { + mockTimeChan <- time.Now() + mockSlotChan <- s + time.Sleep(time.Millisecond * 500) + } + wg.Wait() + + close(mockTimeChan) // Close the channel after test + close(mockSlotChan) }) t.Run("error handling", func(t *testing.T) { @@ -88,18 +103,21 @@ func TestSubmitProposal(t *testing.T) { return errors.New("failed to submit") }).MinTimes(2).MaxTimes(2) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 100 // first time - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time, 1) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().Return(phase0.Slot(100)).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) + mockTimeChan <- time.Now() wg.Add(2) wg.Wait() + close(mockTimeChan) }) } diff --git a/operator/node.go b/operator/node.go index 8c3c98c959..746f2ae494 100644 --- a/operator/node.go +++ b/operator/node.go @@ -17,7 +17,7 @@ import ( "github.com/bloxapp/ssv/operator/duties" "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/fee_recipient" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" @@ -51,7 +51,6 @@ type Options struct { type operatorNode struct { network networkconfig.NetworkConfig context context.Context - ticker slot_ticker.Ticker validatorsCtrl validator.Controller consensusClient beaconprotocol.BeaconNode executionClient *executionclient.ExecutionClient @@ -68,7 +67,7 @@ type operatorNode struct { } // New is the constructor of operatorNode -func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { +func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provider) Node { storageMap := qbftstorage.NewStores() roles := []spectypes.BeaconRole{ @@ -85,7 +84,6 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { node := &operatorNode{ context: opts.Context, - ticker: slotTicker, validatorsCtrl: opts.ValidatorController, network: opts.Network, consensusClient: opts.BeaconNode, @@ -100,18 +98,18 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { ValidatorController: opts.ValidatorController, IndicesChg: opts.ValidatorController.IndicesChangeChan(), ExecuteDuty: opts.ValidatorController.ExecuteDuty, - Ticker: slotTicker, BuilderProposals: opts.ValidatorOptions.BuilderProposals, DutyStore: opts.DutyStore, + SlotTickerProvider: slotTickerProvider, }), feeRecipientCtrl: fee_recipient.NewController(&fee_recipient.ControllerOptions{ - Ctx: opts.Context, - BeaconClient: opts.BeaconNode, - Network: opts.Network, - ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), - RecipientStorage: opts.ValidatorOptions.RegistryStorage, - Ticker: slotTicker, - OperatorData: opts.ValidatorOptions.OperatorData, + Ctx: opts.Context, + BeaconClient: opts.BeaconNode, + Network: opts.Network, + ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), + RecipientStorage: opts.ValidatorOptions.RegistryStorage, + OperatorData: opts.ValidatorOptions.OperatorData, + SlotTickerProvider: slotTickerProvider, }), ws: opts.WS, @@ -141,7 +139,6 @@ func (n *operatorNode) Start(logger *zap.Logger) error { } }() - go n.ticker.Start(logger) n.validatorsCtrl.StartNetworkHandlers() n.validatorsCtrl.StartValidators() go n.net.UpdateSubnets(logger) diff --git a/operator/slot_ticker/mocks/ticker.go b/operator/slot_ticker/mocks/ticker.go deleted file mode 100644 index 2ed11c9fb9..0000000000 --- a/operator/slot_ticker/mocks/ticker.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./ticker.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" - gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" - zap "go.uber.org/zap" -) - -// MockTicker is a mock of Ticker interface. -type MockTicker struct { - ctrl *gomock.Controller - recorder *MockTickerMockRecorder -} - -// MockTickerMockRecorder is the mock recorder for MockTicker. -type MockTickerMockRecorder struct { - mock *MockTicker -} - -// NewMockTicker creates a new mock instance. -func NewMockTicker(ctrl *gomock.Controller) *MockTicker { - mock := &MockTicker{ctrl: ctrl} - mock.recorder = &MockTickerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTicker) EXPECT() *MockTickerMockRecorder { - return m.recorder -} - -// Start mocks base method. -func (m *MockTicker) Start(logger *zap.Logger) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start", logger) -} - -// Start indicates an expected call of Start. -func (mr *MockTickerMockRecorder) Start(logger interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockTicker)(nil).Start), logger) -} - -// Subscribe mocks base method. -func (m *MockTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) - return ret0 -} - -// Subscribe indicates an expected call of Subscribe. -func (mr *MockTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockTicker)(nil).Subscribe), subscription) -} diff --git a/operator/slot_ticker/slotticker.go b/operator/slot_ticker/slotticker.go deleted file mode 100644 index dbb1fc033e..0000000000 --- a/operator/slot_ticker/slotticker.go +++ /dev/null @@ -1,88 +0,0 @@ -package slot_ticker - -import ( - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" -) - -// The TTicker interface defines a type which can expose a -// receive-only channel firing slot events. -type TTicker interface { - C() <-chan phase0.Slot - Done() -} - -// SlotTicker is a special ticker for the beacon chain block. -// The channel emits over the slot interval, and ensures that -// the ticks are in line with the genesis time. This means that -// the duration between the ticks and the genesis time are always a -// multiple of the slot duration. -// In addition, the channel returns the new slot number. -type SlotTicker struct { - c chan phase0.Slot - done chan struct{} -} - -// C returns the ticker channel. Call Cancel afterwards to ensure -// that the goroutine exits cleanly. -func (s *SlotTicker) C() <-chan phase0.Slot { - return s.c -} - -// Done should be called to clean up the ticker. -func (s *SlotTicker) Done() { - go func() { - s.done <- struct{}{} - }() -} - -// NewSlotTicker starts and returns a new SlotTicker instance. -func NewSlotTicker(genesisTime time.Time, secondsPerSlot uint64) *SlotTicker { - if genesisTime.IsZero() { - panic("zero genesis time") - } - ticker := &SlotTicker{ - c: make(chan phase0.Slot), - done: make(chan struct{}), - } - ticker.start(genesisTime, secondsPerSlot, time.Since, time.Until, time.After) - return ticker -} - -func (s *SlotTicker) start( - genesisTime time.Time, - secondsPerSlot uint64, - since, until func(time.Time) time.Duration, - after func(time.Duration) <-chan time.Time) { - - d := time.Duration(secondsPerSlot) * time.Second - - go func() { - sinceGenesis := since(genesisTime) - - var nextTickTime time.Time - var slot phase0.Slot - if sinceGenesis < d { - // Handle when the current time is before the genesis time. - nextTickTime = genesisTime - slot = 0 - } else { - nextTick := sinceGenesis.Truncate(d) + d - nextTickTime = genesisTime.Add(nextTick) - slot = phase0.Slot(nextTick / d) - } - - for { - waitTime := until(nextTickTime) - select { - case <-after(waitTime): - s.c <- slot - slot++ - nextTickTime = nextTickTime.Add(d) - case <-s.done: - return - } - } - }() -} diff --git a/operator/slot_ticker/ticker.go b/operator/slot_ticker/ticker.go deleted file mode 100644 index 06cbe39604..0000000000 --- a/operator/slot_ticker/ticker.go +++ /dev/null @@ -1,84 +0,0 @@ -package slot_ticker - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/prysmaticlabs/prysm/v4/async/event" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/networkconfig" -) - -//go:generate mockgen -package=mocks -destination=./mocks/ticker.go -source=./ticker.go - -type Ticker interface { - // Start ticker process - Start(logger *zap.Logger) - // Subscribe to ticker chan - Subscribe(subscription chan phase0.Slot) event.Subscription -} - -type ticker struct { - ctx context.Context - network networkconfig.NetworkConfig - - // chan - feed *event.Feed -} - -// NewTicker returns Ticker struct pointer -func NewTicker(ctx context.Context, network networkconfig.NetworkConfig) Ticker { - return &ticker{ - ctx: ctx, - network: network, - feed: &event.Feed{}, - } -} - -// Start slot ticker -func (t *ticker) Start(logger *zap.Logger) { - genesisTime := time.Unix(int64(t.network.Beacon.MinGenesisTime()), 0) - slotTicker := NewSlotTicker(genesisTime, uint64(t.network.SlotDurationSec().Seconds())) - t.listenToTicker(logger, slotTicker.C()) -} - -// Subscribe will trigger every slot -func (t *ticker) Subscribe(subscription chan phase0.Slot) event.Subscription { - return t.feed.Subscribe(subscription) -} - -// listenToTicker loop over the given slot channel -func (t *ticker) listenToTicker(logger *zap.Logger, slots <-chan phase0.Slot) { - for currentSlot := range slots { - currentEpoch := t.network.Beacon.EstimatedEpochAtSlot(currentSlot) - buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, currentSlot, currentSlot%32+1) - logger.Debug("📅 slot ticker", zap.String("epoch_slot_seq", buildStr)) - if !t.genesisEpochEffective(logger) { - continue - } - // notify current slot to channel - _ = t.feed.Send(currentSlot) - } -} - -func (t *ticker) genesisEpochEffective(logger *zap.Logger) bool { - curSlot := t.network.Beacon.EstimatedCurrentSlot() - genSlot := t.network.Beacon.GetEpochFirstSlot(t.network.GenesisEpoch) - if curSlot < genSlot { - if t.network.Beacon.IsFirstSlotOfEpoch(curSlot) { - // wait until genesis epoch starts - curEpoch := t.network.Beacon.EstimatedCurrentEpoch() - gnsTime := t.network.Beacon.GetSlotStartTime(genSlot) - logger.Info("duties paused, will resume duties on genesis epoch", - zap.Uint64("genesis_epoch", uint64(t.network.GenesisEpoch)), - zap.Uint64("current_epoch", uint64(curEpoch)), - zap.String("genesis_time", gnsTime.Format(time.UnixDate))) - } - return false - } - - return true -} diff --git a/operator/slotticker/mocks/slotticker.go b/operator/slotticker/mocks/slotticker.go new file mode 100644 index 0000000000..f8e56df5b1 --- /dev/null +++ b/operator/slotticker/mocks/slotticker.go @@ -0,0 +1,115 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./slotticker.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + gomock "github.com/golang/mock/gomock" +) + +// MockSlotTicker is a mock of SlotTicker interface. +type MockSlotTicker struct { + ctrl *gomock.Controller + recorder *MockSlotTickerMockRecorder +} + +// MockSlotTickerMockRecorder is the mock recorder for MockSlotTicker. +type MockSlotTickerMockRecorder struct { + mock *MockSlotTicker +} + +// NewMockSlotTicker creates a new mock instance. +func NewMockSlotTicker(ctrl *gomock.Controller) *MockSlotTicker { + mock := &MockSlotTicker{ctrl: ctrl} + mock.recorder = &MockSlotTickerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { + return m.recorder +} + +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) +} + +// MockConfigProvider is a mock of ConfigProvider interface. +type MockConfigProvider struct { + ctrl *gomock.Controller + recorder *MockConfigProviderMockRecorder +} + +// MockConfigProviderMockRecorder is the mock recorder for MockConfigProvider. +type MockConfigProviderMockRecorder struct { + mock *MockConfigProvider +} + +// NewMockConfigProvider creates a new mock instance. +func NewMockConfigProvider(ctrl *gomock.Controller) *MockConfigProvider { + mock := &MockConfigProvider{ctrl: ctrl} + mock.recorder = &MockConfigProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConfigProvider) EXPECT() *MockConfigProviderMockRecorder { + return m.recorder +} + +// GetGenesisTime mocks base method. +func (m *MockConfigProvider) GetGenesisTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGenesisTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetGenesisTime indicates an expected call of GetGenesisTime. +func (mr *MockConfigProviderMockRecorder) GetGenesisTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGenesisTime", reflect.TypeOf((*MockConfigProvider)(nil).GetGenesisTime)) +} + +// SlotDurationSec mocks base method. +func (m *MockConfigProvider) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockConfigProviderMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockConfigProvider)(nil).SlotDurationSec)) +} diff --git a/operator/slotticker/slotticker.go b/operator/slotticker/slotticker.go new file mode 100644 index 0000000000..74e6511092 --- /dev/null +++ b/operator/slotticker/slotticker.go @@ -0,0 +1,96 @@ +package slotticker + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +//go:generate mockgen -package=mocks -destination=./mocks/slotticker.go -source=./slotticker.go + +type Provider func() SlotTicker + +type SlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot +} + +type ConfigProvider interface { + SlotDurationSec() time.Duration + GetGenesisTime() time.Time +} + +type Config struct { + slotDuration time.Duration + genesisTime time.Time +} + +func (cfg Config) SlotDurationSec() time.Duration { + return cfg.slotDuration +} + +func (cfg Config) GetGenesisTime() time.Time { + return cfg.genesisTime +} + +type slotTicker struct { + timer *time.Timer + slotDuration time.Duration + genesisTime time.Time + slot phase0.Slot +} + +// New returns a goroutine-free SlotTicker implementation which is not thread-safe. +func New(cfgProvider ConfigProvider) *slotTicker { + genesisTime := cfgProvider.GetGenesisTime() + slotDuration := cfgProvider.SlotDurationSec() + + now := time.Now() + timeSinceGenesis := now.Sub(genesisTime) + + var initialDelay time.Duration + if timeSinceGenesis < 0 { + // Genesis time is in the future + initialDelay = -timeSinceGenesis // Wait until the genesis time + } else { + slotsSinceGenesis := timeSinceGenesis / slotDuration + nextSlotStartTime := genesisTime.Add((slotsSinceGenesis + 1) * slotDuration) + initialDelay = time.Until(nextSlotStartTime) + } + + return &slotTicker{ + timer: time.NewTimer(initialDelay), + slotDuration: slotDuration, + genesisTime: genesisTime, + slot: 0, + } +} + +// Next returns a channel that signals when the next slot should start. +// Note: This function is not thread-safe and should be called in a serialized fashion. +// Make sure no concurrent calls happen, as it can result in unexpected behavior. +func (s *slotTicker) Next() <-chan time.Time { + timeSinceGenesis := time.Since(s.genesisTime) + if timeSinceGenesis < 0 { + return s.timer.C + } + if !s.timer.Stop() { + // try to drain the channel, but don't block if there's no value + select { + case <-s.timer.C: + default: + } + } + slotNumber := uint64(timeSinceGenesis / s.slotDuration) + nextSlotStartTime := s.genesisTime.Add(time.Duration(slotNumber+1) * s.slotDuration) + s.timer.Reset(time.Until(nextSlotStartTime)) + s.slot = phase0.Slot(slotNumber + 1) + return s.timer.C +} + +// Slot returns the current slot number. +// Note: Like the Next function, this method is also not thread-safe. +// It should be called in a serialized manner after calling Next. +func (s *slotTicker) Slot() phase0.Slot { + return s.slot +} diff --git a/operator/slotticker/slotticker_test.go b/operator/slotticker/slotticker_test.go new file mode 100644 index 0000000000..612e61d492 --- /dev/null +++ b/operator/slotticker/slotticker_test.go @@ -0,0 +1,179 @@ +package slotticker + +import ( + "sync" + "testing" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/cornelk/hashmap/assert" + "github.com/stretchr/testify/require" +) + +func TestSlotTicker(t *testing.T) { + const numTicks = 3 + slotDuration := 200 * time.Millisecond + // Set the genesis time such that we start from slot 1 + genesisTime := time.Now().Truncate(slotDuration).Add(-slotDuration) + + // Calculate the expected starting slot based on genesisTime + timeSinceGenesis := time.Since(genesisTime) + expectedSlot := phase0.Slot(timeSinceGenesis/slotDuration) + 1 + + ticker := New(Config{slotDuration, genesisTime}) + + for i := 0; i < numTicks; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, expectedSlot, slot) + expectedSlot++ + } +} + +func TestTickerInitialization(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + start := time.Now() + <-ticker.Next() + slot := ticker.Slot() + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + elapsed := time.Since(start) + assert.True(t, elapsed+buffer >= slotDuration, "First tick occurred too soon: %v", elapsed.String()) + require.Equal(t, phase0.Slot(1), slot) +} + +func TestSlotNumberConsistency(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + var lastSlot phase0.Slot + + for i := 0; i < 10; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, lastSlot+1, slot) + lastSlot = slot + } +} + +func TestGenesisInFuture(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now().Add(1 * time.Second) // Setting genesis time 1s in the future + + ticker := New(Config{slotDuration, genesisTime}) + start := time.Now() + + <-ticker.Next() + + // The first tick should occur after the genesis time + expectedFirstTickDuration := genesisTime.Sub(start) + actualFirstTickDuration := time.Since(start) + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + assert.True(t, actualFirstTickDuration+buffer >= expectedFirstTickDuration, "First tick occurred too soon. Expected at least: %v, but got: %v", expectedFirstTickDuration.String(), actualFirstTickDuration.String()) +} + +func TestBoundedDrift(t *testing.T) { + slotDuration := 20 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + ticks := 100 + + start := time.Now() + for i := 0; i < ticks; i++ { + <-ticker.Next() + } + expectedDuration := time.Duration(ticks) * slotDuration + elapsed := time.Since(start) + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed >= expectedDuration-buffer && elapsed <= expectedDuration+buffer, "Drifted too far from expected time. Expected: %v, Actual: %v", expectedDuration.String(), elapsed.String()) +} + +func TestMultipleSlotTickers(t *testing.T) { + const ( + numTickers = 1000 + ticksPerTimer = 3 + ) + + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + // Start the clock to time the full execution of all tickers + start := time.Now() + + var wg sync.WaitGroup + wg.Add(numTickers) + + for i := 0; i < numTickers; i++ { + go func() { + defer wg.Done() + ticker := New(Config{slotDuration, genesisTime}) + for j := 0; j < ticksPerTimer; j++ { + <-ticker.Next() + } + }() + } + + wg.Wait() + + // Calculate the total time taken for all tickers to complete their ticks + elapsed := time.Since(start) + expectedDuration := slotDuration * ticksPerTimer + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed <= expectedDuration+buffer, "Expected all tickers to complete within %v but took %v", expectedDuration.String(), elapsed.String()) +} + +func TestSlotSkipping(t *testing.T) { + const ( + numTicks = 100 + skipInterval = 10 // Introduce a delay every 10 ticks + slotDuration = 20 * time.Millisecond + ) + + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + var lastSlot phase0.Slot + for i := 1; i <= numTicks; i++ { // Starting loop from 1 for ease of skipInterval check + select { + case <-ticker.Next(): + slot := ticker.Slot() + + // Ensure we never receive slots out of order or repeatedly + require.Equal(t, slot, lastSlot+1, "Expected slot %d to be one more than the last slot %d", slot, lastSlot) + lastSlot = slot + + // If it's the 10th tick or any multiple thereof + if i%skipInterval == 0 { + // Introduce delay to skip a slot + time.Sleep(slotDuration) + + // Ensure the next slot we receive is exactly 2 slots ahead of the previous slot + <-ticker.Next() + slotAfterDelay := ticker.Slot() + require.Equal(t, lastSlot+2, slotAfterDelay, "Expected to skip a slot after introducing a delay") + + // Update the slot variable to use this new slot for further iterations + lastSlot = slotAfterDelay + } + + case <-time.After(2 * slotDuration): // Fail if we don't get a tick within a reasonable time + t.Fatalf("Did not receive expected tick for iteration %d", i) + } + } +} From b93b70cdb89bf9f59055c345997b17a8d0c0d2d5 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Tue, 24 Oct 2023 12:48:01 +0300 Subject: [PATCH 16/54] Align to `ssv-spec` v0.3.3 (#1109) * update ssv-spec to #drop-blinded-block-rejection * refactors * update spec JSONs * update ssv-spec to main branch * generate mocks * spec alignments * refactors: remove syncing methods * lint fixes * more alignments & differ approvals * ignore voluntary exit tests * log skipped tests * state comparison wip * update spec to 0.3.3 * go mod tidy * qbft spectest fixes * undo temporary test * more spec test fixes * fix more spec tests * fixes for new linter version * approve spec diffs * removed unused method * remove unused func --- beacon/goclient/goclient.go | 1 + beacon/goclient/voluntary_exit.go | 10 + eth/executionclient/execution_client_test.go | 14 +- go.mod | 4 +- go.sum | 4 +- integration/qbft/tests/scenario_test.go | 12 - network/p2p/p2p.go | 7 - network/p2p/p2p_sync.go | 123 +--------- network/p2p/p2p_test.go | 31 ++- network/syncing/concurrent.go | 189 -------------- network/syncing/concurrent_test.go | 148 ----------- network/syncing/mocks/syncer.go | 127 ---------- network/syncing/syncer.go | 231 ----------------- network/syncing/syncer_test.go | 35 --- operator/duties/attester.go | 3 +- operator/duties/base_handler.go | 5 + operator/duties/mocks/scheduler.go | 28 +-- operator/duties/proposer.go | 3 +- operator/duties/sync_committee.go | 3 +- operator/validator/controller.go | 38 +-- operator/validator/mocks/controller.go | 28 +-- protocol/v2/blockchain/beacon/mock_client.go | 14 ++ protocol/v2/p2p/network.go | 19 +- protocol/v2/qbft/controller/controller.go | 35 ++- protocol/v2/qbft/controller/decided.go | 2 - protocol/v2/qbft/controller/future_msg.go | 77 ------ protocol/v2/qbft/instance/instance.go | 10 +- protocol/v2/qbft/instance/marshalutils.go | 47 ++++ .../v2/qbft/spectest/controller_sync_type.go | 55 ----- protocol/v2/qbft/spectest/controller_type.go | 71 ++++-- .../v2/qbft/spectest/msg_processing_type.go | 27 +- .../v2/qbft/spectest/qbft_mapping_test.go | 11 - protocol/v2/ssv/runner/runner.go | 11 + .../v2/ssv/runner/validator_registration.go | 8 +- protocol/v2/ssv/runner/voluntary_exit.go | 232 ++++++++++++++++++ .../v2/ssv/spectest/msg_processing_type.go | 51 ++++ .../ssv/spectest/multi_msg_processing_type.go | 29 ++- .../multi_start_new_runner_duty_type.go | 70 +++++- protocol/v2/ssv/spectest/ssv_mapping_test.go | 7 +- protocol/v2/ssv/testing/runner.go | 16 +- protocol/v2/ssv/validator/startup.go | 27 -- protocol/v2/sync/handlers/decided_history.go | 57 ----- protocol/v2/sync/handlers/last_decided.go | 53 ---- protocol/v2/testing/test_utils.go | 27 +- scripts/spec-alignment/differ.config.yaml | 2 +- utils/rsaencryption/testingspace/vars.go | 1 + 46 files changed, 688 insertions(+), 1315 deletions(-) create mode 100644 beacon/goclient/voluntary_exit.go delete mode 100644 network/syncing/concurrent.go delete mode 100644 network/syncing/concurrent_test.go delete mode 100644 network/syncing/mocks/syncer.go delete mode 100644 network/syncing/syncer.go delete mode 100644 network/syncing/syncer_test.go delete mode 100644 protocol/v2/qbft/controller/future_msg.go create mode 100644 protocol/v2/qbft/instance/marshalutils.go delete mode 100644 protocol/v2/qbft/spectest/controller_sync_type.go create mode 100644 protocol/v2/ssv/runner/voluntary_exit.go delete mode 100644 protocol/v2/sync/handlers/decided_history.go delete mode 100644 protocol/v2/sync/handlers/last_decided.go diff --git a/beacon/goclient/goclient.go b/beacon/goclient/goclient.go index 8448ba70ff..de3ed18c0d 100644 --- a/beacon/goclient/goclient.go +++ b/beacon/goclient/goclient.go @@ -122,6 +122,7 @@ type Client interface { eth2client.BlindedBeaconBlockProposalProvider eth2client.BlindedBeaconBlockSubmitter eth2client.ValidatorRegistrationsSubmitter + eth2client.VoluntaryExitSubmitter } type NodeClientProvider interface { diff --git a/beacon/goclient/voluntary_exit.go b/beacon/goclient/voluntary_exit.go new file mode 100644 index 0000000000..bb2dfaa62f --- /dev/null +++ b/beacon/goclient/voluntary_exit.go @@ -0,0 +1,10 @@ +package goclient + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" +) + +func (gc *goClient) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + return errors.New("not implemented") +} diff --git a/eth/executionclient/execution_client_test.go b/eth/executionclient/execution_client_test.go index 823515c52b..4fed0795c3 100644 --- a/eth/executionclient/execution_client_test.go +++ b/eth/executionclient/execution_client_test.go @@ -67,7 +67,7 @@ func TestFetchHistoricalLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -131,7 +131,7 @@ func TestStreamLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -215,7 +215,7 @@ func TestFetchLogsInBatches(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -325,7 +325,7 @@ func TestChainReorganizationLogs(t *testing.T) { // defer rpcServer.Stop() // defer httpsrv.Close() - // addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + // addr := httpToWebSocketURL(httpsrv.URL) // // 1. // parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -417,7 +417,7 @@ func TestSimSSV(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -584,3 +584,7 @@ func TestSimSSV(t *testing.T) { require.NoError(t, client.Close()) require.NoError(t, sim.Close()) } + +func httpToWebSocketURL(url string) string { + return "ws:" + strings.TrimPrefix(url, "http:") +} diff --git a/go.mod b/go.mod index eebec2fb8a..be8456527b 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.16.3 github.com/bloxapp/eth2-key-manager v1.3.1 - github.com/bloxapp/ssv-spec v0.3.1 + github.com/bloxapp/ssv-spec v0.3.3 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/cornelk/hashmap v1.0.8 @@ -222,5 +222,3 @@ require ( replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f - -replace github.com/bloxapp/ssv-spec => github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 diff --git a/go.sum b/go.sum index 7b8753260f..5a76a37965 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,8 @@ github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHl github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bloxapp/eth2-key-manager v1.3.1 h1:1olQcOHRY2TN1o8JX9AN1siEIJXWnlM+BlknfBbXoo4= github.com/bloxapp/eth2-key-manager v1.3.1/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 h1:ikChvdYVw4GFSlnIS+u1qmNqOvgq2a2H3b2FZ44KBn8= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= +github.com/bloxapp/ssv-spec v0.3.3 h1:iNomqWQjxDDQouHMjl27PmH1hUolJ4u8QQ+HX/TQQcg= +github.com/bloxapp/ssv-spec v0.3.3/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index 55736dd419..e803fd9616 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -2,7 +2,6 @@ package tests import ( "context" - "fmt" "testing" "time" @@ -21,11 +20,9 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/validator" protocolbeacon "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" @@ -63,15 +60,6 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { for id := 1; id <= s.Committee; id++ { id := spectypes.OperatorID(id) s.validators[id] = createValidator(t, ctx, id, getKeySet(s.Committee), logger, s.shared.Nodes[id]) - - stores := newStores(logger) - s.shared.Nodes[id].RegisterHandlers(logger, protocolp2p.WithHandler( - protocolp2p.LastDecidedProtocol, - handlers.LastDecidedHandler(logger.Named(fmt.Sprintf("decided-handler-%d", id)), stores, s.shared.Nodes[id]), - ), protocolp2p.WithHandler( - protocolp2p.DecidedHistoryProtocol, - handlers.HistoryHandler(logger.Named(fmt.Sprintf("history-handler-%d", id)), stores, s.shared.Nodes[id], 25), - )) } //invoking duties diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index e665e41143..768d583042 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -23,7 +23,6 @@ import ( "github.com/bloxapp/ssv/network/peers/connections" "github.com/bloxapp/ssv/network/records" "github.com/bloxapp/ssv/network/streams" - "github.com/bloxapp/ssv/network/syncing" "github.com/bloxapp/ssv/network/topics" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/utils/async" @@ -72,7 +71,6 @@ type p2pNetwork struct { backoffConnector *libp2pdiscbackoff.BackoffConnector subnets []byte libConnManager connmgrcore.ConnManager - syncer syncing.Syncer nodeStorage operatorstorage.Storage operatorPKCache sync.Map } @@ -172,11 +170,6 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { return err } - // Create & start ConcurrentSyncer. - syncer := syncing.NewConcurrent(n.ctx, syncing.New(n, n.msgValidator), 16, syncing.DefaultTimeouts, nil) - go syncer.Run(logger) - n.syncer = syncer - return nil } diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index a43e199615..74ac3a4e14 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -1,14 +1,11 @@ package p2pv1 import ( - "context" "encoding/hex" "fmt" "math/rand" "time" - "github.com/bloxapp/ssv-spec/qbft" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -21,126 +18,8 @@ import ( "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) -func (n *p2pNetwork) SyncHighestDecided(mid spectypes.MessageID) error { - ctx := context.TODO() // TODO: pass context to SyncHighestDecided - - return n.syncer.SyncHighestDecided(ctx, n.interfaceLogger, mid, func(msg *queue.DecodedSSVMessage) { - n.msgRouter.Route(ctx, msg) - }) -} - -func (n *p2pNetwork) SyncDecidedByRange(mid spectypes.MessageID, from, to qbft.Height) { - ctx := context.TODO() // TODO: pass context to SyncDecidedByRange - - if !n.cfg.FullNode { - return - } - // TODO: uncomment to fix syncing bug! - // if from < to { - // n.logger.Warn("failed to sync decided by range: from is greater than to", - // zap.String("pubkey", hex.EncodeToString(mid.GetPubKey())), - // zap.String("role", mid.GetRoleType().String()), - // zap.Uint64("from", uint64(from)), - // zap.Uint64("to", uint64(to))) - // return - // } - if to > from { - n.interfaceLogger.Warn("failed to sync decided by range: to is higher than from", - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - return - } - - // TODO: this is a temporary solution to prevent syncing already decided heights. - // Example: Say we received a decided at height 99, and right after we received a decided at height 100 - // before we could advance the controller's height. This would cause the controller to call SyncDecidedByRange. - // However, height 99 is already synced, so temporarily we reject such requests here. - // Note: This isn't ideal because sometimes you do want to sync gaps of 1. - const minGap = 2 - if to-from < minGap { - return - } - - err := n.syncer.SyncDecidedByRange(ctx, n.interfaceLogger, mid, from, to, func(msg *queue.DecodedSSVMessage) { - n.msgRouter.Route(ctx, msg) - }) - if err != nil { - n.interfaceLogger.Error("failed to sync decided by range", zap.Error(err)) - } -} - -// LastDecided fetches last decided from a random set of peers -func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { - const ( - minPeers = 3 - waitTime = time.Second * 24 - ) - if !n.isReady() { - return nil, p2pprotocol.ErrNetworkIsNotReady - } - pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) - peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) - if err != nil { - return nil, errors.Wrap(err, "could not get subset of peers") - } - return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ - Params: &message.SyncParams{ - Identifier: mid, - }, - Protocol: message.LastDecidedType, - }) -} - -// GetHistory sync the given range from a set of peers that supports history for the given identifier -func (n *p2pNetwork) GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]p2pprotocol.SyncResult, specqbft.Height, error) { - if from >= to { - return nil, 0, nil - } - - if !n.isReady() { - return nil, 0, p2pprotocol.ErrNetworkIsNotReady - } - protocolID, peerCount := commons.ProtocolID(p2pprotocol.DecidedHistoryProtocol) - peers := make([]peer.ID, 0) - for _, t := range targets { - p, err := peer.Decode(t) - if err != nil { - continue - } - peers = append(peers, p) - } - // if no peers were provided -> select a random set of peers - if len(peers) == 0 { - random, err := n.getSubsetOfPeers(logger, mid.GetPubKey(), peerCount, n.peersWithProtocolsFilter(protocolID)) - if err != nil { - return nil, 0, errors.Wrap(err, "could not get subset of peers") - } - peers = random - } - maxBatchRes := specqbft.Height(n.cfg.MaxBatchResponse) - - var results []p2pprotocol.SyncResult - var err error - currentEnd := to - if to-from > maxBatchRes { - currentEnd = from + maxBatchRes - } - results, err = n.makeSyncRequest(logger, peers, mid, protocolID, &message.SyncMessage{ - Params: &message.SyncParams{ - Height: []specqbft.Height{from, currentEnd}, - Identifier: mid, - }, - Protocol: message.DecidedHistoryType, - }) - if err != nil { - return results, 0, err - } - return results, currentEnd, nil -} - // RegisterHandlers registers the given handlers func (n *p2pNetwork) RegisterHandlers(logger *zap.Logger, handlers ...*p2pprotocol.SyncHandler) { m := make(map[libp2p_protocol.ID][]p2pprotocol.RequestHandler) @@ -277,6 +156,8 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp } // peersWithProtocolsFilter is used to accept peers that supports the given protocols +// +//nolint:unused func (n *p2pNetwork) peersWithProtocolsFilter(protocols ...libp2p_protocol.ID) func(peer.ID) bool { return func(id peer.ID) bool { supported, err := n.host.Network().Peerstore().SupportsProtocols(id, protocols...) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 4aace4bc40..d2152c049e 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -9,7 +9,9 @@ import ( "time" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" specqbft "github.com/bloxapp/ssv-spec/qbft" @@ -20,7 +22,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/network" - protcolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" + p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) func TestGetMaxPeers(t *testing.T) { @@ -141,7 +143,7 @@ func TestP2pNetwork_Stream(t *testing.T) { <-time.After(time.Second) node := ln.Nodes[0] - res, err := node.LastDecided(logger, mid) + res, err := node.(*p2pNetwork).LastDecided(logger, mid) require.NoError(t, err) select { case err := <-errors: @@ -206,9 +208,30 @@ func TestWaitSubsetOfPeers(t *testing.T) { } } +func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { + const ( + minPeers = 3 + waitTime = time.Second * 24 + ) + if !n.isReady() { + return nil, p2pprotocol.ErrNetworkIsNotReady + } + pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) + peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) + if err != nil { + return nil, errors.Wrap(err, "could not get subset of peers") + } + return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ + Params: &message.SyncParams{ + Identifier: mid, + }, + Protocol: message.LastDecidedType, + }) +} + func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes.MessageID, height specqbft.Height, round specqbft.Round, counter *int64, errors chan<- error) { - node.RegisterHandlers(logger, &protcolp2p.SyncHandler{ - Protocol: protcolp2p.LastDecidedProtocol, + node.RegisterHandlers(logger, &p2pprotocol.SyncHandler{ + Protocol: p2pprotocol.LastDecidedProtocol, Handler: func(message *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { atomic.AddInt64(counter, 1) sm := specqbft.SignedMessage{ diff --git a/network/syncing/concurrent.go b/network/syncing/concurrent.go deleted file mode 100644 index d3ddcd2ec1..0000000000 --- a/network/syncing/concurrent.go +++ /dev/null @@ -1,189 +0,0 @@ -package syncing - -import ( - "context" - "fmt" - "sync" - "time" - - "go.uber.org/zap" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" -) - -// Error describes an error that occurred during a syncing operation. -type Error struct { - Operation Operation - Err error -} - -func (e Error) Error() string { - return fmt.Sprintf("%s: %v", e.Operation, e.Err) -} - -// Timeouts is a set of timeouts for each syncing operation. -type Timeouts struct { - // SyncHighestDecided is the timeout for SyncHighestDecided. - // Leave zero to not timeout. - SyncHighestDecided time.Duration - - // SyncDecidedByRange is the timeout for SyncDecidedByRange. - // Leave zero to not timeout. - SyncDecidedByRange time.Duration -} - -var DefaultTimeouts = Timeouts{ - SyncHighestDecided: 12 * time.Second, - SyncDecidedByRange: 30 * time.Minute, -} - -// Operation is a syncing operation that has been queued for execution. -type Operation interface { - run(context.Context, *zap.Logger, Syncer) error - timeout(Timeouts) time.Duration -} - -type OperationSyncHighestDecided struct { - ID spectypes.MessageID - Handler MessageHandler -} - -func (o OperationSyncHighestDecided) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncHighestDecided(ctx, logger, o.ID, o.Handler) -} - -func (o OperationSyncHighestDecided) timeout(t Timeouts) time.Duration { - return t.SyncHighestDecided -} - -func (o OperationSyncHighestDecided) String() string { - return fmt.Sprintf("SyncHighestDecided(%s)", o.ID) -} - -type OperationSyncDecidedByRange struct { - ID spectypes.MessageID - From specqbft.Height - To specqbft.Height - Handler MessageHandler -} - -func (o OperationSyncDecidedByRange) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncDecidedByRange(ctx, logger, o.ID, o.From, o.To, o.Handler) -} - -func (o OperationSyncDecidedByRange) timeout(t Timeouts) time.Duration { - return t.SyncDecidedByRange -} - -func (o OperationSyncDecidedByRange) String() string { - return fmt.Sprintf("SyncDecidedByRange(%s, %d, %d)", o.ID, o.From, o.To) -} - -// ConcurrentSyncer is a Syncer that runs the given Syncer's methods concurrently. -type ConcurrentSyncer struct { - syncer Syncer - ctx context.Context - jobs chan Operation - errors chan<- Error - concurrency int - timeouts Timeouts -} - -// NewConcurrent returns a new Syncer that runs the given Syncer's methods concurrently. -// Unlike the standard syncer, syncing methods are non-blocking and return immediately without error. -// concurrency is the number of worker goroutines to spawn. -// errors is a channel to which any errors are sent. Pass nil to discard errors. -func NewConcurrent( - ctx context.Context, - syncer Syncer, - concurrency int, - timeouts Timeouts, - errors chan<- Error, -) *ConcurrentSyncer { - return &ConcurrentSyncer{ - syncer: syncer, - ctx: ctx, - // TODO: make the buffer size configurable or better-yet unbounded? - jobs: make(chan Operation, 128*1024), - errors: errors, - concurrency: concurrency, - timeouts: timeouts, - } -} - -// Run starts the worker goroutines and blocks until the context is done -// and any remaining jobs are finished. -func (s *ConcurrentSyncer) Run(logger *zap.Logger) { - // Spawn worker goroutines. - var wg sync.WaitGroup - for i := 0; i < s.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for job := range s.jobs { - s.do(logger, job) - } - }() - } - - // Close the jobs channel when the context is done. - <-s.ctx.Done() - close(s.jobs) - - // Wait for workers to finish their current jobs. - wg.Wait() -} - -func (s *ConcurrentSyncer) do(logger *zap.Logger, job Operation) { - ctx, cancel := context.WithTimeout(s.ctx, job.timeout(s.timeouts)) - defer cancel() - err := job.run(ctx, logger, s.syncer) - if err != nil && s.errors != nil { - s.errors <- Error{ - Operation: job, - Err: err, - } - } -} - -// Queued returns the number of jobs that are queued but not yet started. -func (s *ConcurrentSyncer) Queued() int { - return len(s.jobs) -} - -// Capacity returns the maximum number of jobs that can be queued. -// When Queued() == Capacity(), then the next call will block -// until a job is finished. -func (s *ConcurrentSyncer) Capacity() int { - return cap(s.jobs) -} - -func (s *ConcurrentSyncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - s.jobs <- OperationSyncHighestDecided{ - ID: id, - Handler: handler, - } - return nil -} - -func (s *ConcurrentSyncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - s.jobs <- OperationSyncDecidedByRange{ - ID: id, - From: from, - To: to, - Handler: handler, - } - return nil -} diff --git a/network/syncing/concurrent_test.go b/network/syncing/concurrent_test.go deleted file mode 100644 index ace426f6a2..0000000000 --- a/network/syncing/concurrent_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package syncing_test - -import ( - "context" - "fmt" - "runtime" - "testing" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/syncing" - "github.com/bloxapp/ssv/network/syncing/mocks" -) - -func TestConcurrentSyncer(t *testing.T) { - logger := logging.TestLogger(t) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Test setup - syncer := mocks.NewMockSyncer(ctrl) - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(nil) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - syncer.EXPECT().SyncDecidedByRange(gomock.Any(), gomock.Any(), id, from, to, gomock.Any()).Return(nil) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - - // Test error handling - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(fmt.Errorf("test error")) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Wait for the syncer to finish - cancel() - - // Verify errors. - select { - case err := <-errors: - require.IsType(t, syncing.OperationSyncHighestDecided{}, err.Operation) - require.Equal(t, id, err.Operation.(syncing.OperationSyncHighestDecided).ID) - require.Equal(t, "test error", err.Err.Error()) - case <-done: - t.Fatal("error channel should have received an error") - } - <-done -} - -func TestConcurrentSyncerMemoryUsage(t *testing.T) { - logger := logging.TestLogger(t) - - for i := 0; i < 4; i++ { - var before runtime.MemStats - runtime.ReadMemStats(&before) - - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - - var after runtime.MemStats - runtime.ReadMemStats(&after) - t.Logf("Allocated: %.2f MB", float64(after.TotalAlloc-before.TotalAlloc)/1024/1024) - } -} - -func BenchmarkConcurrentSyncer(b *testing.B) { - logger := logging.BenchLogger(b) - - for i := 0; i < b.N; i++ { - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(b, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(b, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - } -} diff --git a/network/syncing/mocks/syncer.go b/network/syncing/mocks/syncer.go deleted file mode 100644 index 1aa3a3d55d..0000000000 --- a/network/syncing/mocks/syncer.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./syncer.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - qbft "github.com/bloxapp/ssv-spec/qbft" - types "github.com/bloxapp/ssv-spec/types" - syncing "github.com/bloxapp/ssv/network/syncing" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - gomock "github.com/golang/mock/gomock" - zap "go.uber.org/zap" -) - -// MockSyncer is a mock of Syncer interface. -type MockSyncer struct { - ctrl *gomock.Controller - recorder *MockSyncerMockRecorder -} - -// MockSyncerMockRecorder is the mock recorder for MockSyncer. -type MockSyncerMockRecorder struct { - mock *MockSyncer -} - -// NewMockSyncer creates a new mock instance. -func NewMockSyncer(ctrl *gomock.Controller) *MockSyncer { - mock := &MockSyncer{ctrl: ctrl} - mock.recorder = &MockSyncerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSyncer) EXPECT() *MockSyncerMockRecorder { - return m.recorder -} - -// SyncDecidedByRange mocks base method. -func (m *MockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id types.MessageID, from, to qbft.Height, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncDecidedByRange", ctx, logger, id, from, to, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncDecidedByRange indicates an expected call of SyncDecidedByRange. -func (mr *MockSyncerMockRecorder) SyncDecidedByRange(ctx, logger, id, from, to, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncDecidedByRange", reflect.TypeOf((*MockSyncer)(nil).SyncDecidedByRange), ctx, logger, id, from, to, handler) -} - -// SyncHighestDecided mocks base method. -func (m *MockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id types.MessageID, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncHighestDecided", ctx, logger, id, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncHighestDecided indicates an expected call of SyncHighestDecided. -func (mr *MockSyncerMockRecorder) SyncHighestDecided(ctx, logger, id, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncHighestDecided", reflect.TypeOf((*MockSyncer)(nil).SyncHighestDecided), ctx, logger, id, handler) -} - -// MockNetwork is a mock of Network interface. -type MockNetwork struct { - ctrl *gomock.Controller - recorder *MockNetworkMockRecorder -} - -// MockNetworkMockRecorder is the mock recorder for MockNetwork. -type MockNetworkMockRecorder struct { - mock *MockNetwork -} - -// NewMockNetwork creates a new mock instance. -func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { - mock := &MockNetwork{ctrl: ctrl} - mock.recorder = &MockNetworkMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { - return m.recorder -} - -// GetHistory mocks base method. -func (m *MockNetwork) GetHistory(logger *zap.Logger, id types.MessageID, from, to qbft.Height, targets ...string) ([]protocolp2p.SyncResult, qbft.Height, error) { - m.ctrl.T.Helper() - varargs := []interface{}{logger, id, from, to} - for _, a := range targets { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetHistory", varargs...) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(qbft.Height) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetHistory indicates an expected call of GetHistory. -func (mr *MockNetworkMockRecorder) GetHistory(logger, id, from, to interface{}, targets ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{logger, id, from, to}, targets...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistory", reflect.TypeOf((*MockNetwork)(nil).GetHistory), varargs...) -} - -// LastDecided mocks base method. -func (m *MockNetwork) LastDecided(logger *zap.Logger, id types.MessageID) ([]protocolp2p.SyncResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastDecided", logger, id) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LastDecided indicates an expected call of LastDecided. -func (mr *MockNetworkMockRecorder) LastDecided(logger, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastDecided", reflect.TypeOf((*MockNetwork)(nil).LastDecided), logger, id) -} diff --git a/network/syncing/syncer.go b/network/syncing/syncer.go deleted file mode 100644 index 0ac532e1ae..0000000000 --- a/network/syncing/syncer.go +++ /dev/null @@ -1,231 +0,0 @@ -package syncing - -import ( - "context" - "time" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/message/validation" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/protocol/v2/ssv/queue" - "github.com/bloxapp/ssv/utils/tasks" -) - -//go:generate mockgen -package=mocks -destination=./mocks/syncer.go -source=./syncer.go - -// MessageHandler reacts to a message received from Syncer. -type MessageHandler func(msg *queue.DecodedSSVMessage) - -// Syncer handles the syncing of decided messages. -type Syncer interface { - SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler MessageHandler) error - SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, - ) error -} - -// Network is a subset of protocolp2p.Syncer, required by Syncer to retrieve messages from peers. -type Network interface { - LastDecided(logger *zap.Logger, id spectypes.MessageID) ([]protocolp2p.SyncResult, error) - GetHistory( - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - targets ...string, - ) ([]protocolp2p.SyncResult, specqbft.Height, error) -} - -type syncer struct { - network Network - msgValidator validation.MessageValidator -} - -// New returns a standard implementation of Syncer. -func New(network Network, msgValidator validation.MessageValidator) Syncer { - return &syncer{ - network: network, - msgValidator: msgValidator, - } -} - -func (s *syncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncHighestDecided"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType())) - - lastDecided, err := s.network.LastDecided(logger, id) - if err != nil { - logger.Debug("last decided sync failed", zap.Error(err)) - return errors.Wrap(err, "could not sync last decided") - } - if len(lastDecided) == 0 { - logger.Debug("no messages were synced") - return nil - } - - results := protocolp2p.SyncResults(lastDecided) - var maxHeight specqbft.Height - results.ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if m.Message.Height > maxHeight { - maxHeight = m.Message.Height - } - raw, err := m.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return false - } - - ssvMessage := &spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - } - - decodedMsg, _, err := s.msgValidator.ValidateSSVMessage(ssvMessage) - if err != nil { - logger.Debug("could not validate ssv message", zap.Error(err)) - return false - } - - handler(decodedMsg) - - return false - }) - logger.Debug("synced last decided", zap.Uint64("highest_height", uint64(maxHeight)), zap.Int("messages", len(lastDecided))) - return nil -} - -func (s *syncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncDecidedByRange"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType()), - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - logger.Debug("syncing decided by range") - - err := s.getDecidedByRange( - context.Background(), - logger, - id, - from, - to, - func(sm *specqbft.SignedMessage) error { - raw, err := sm.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return nil - } - - ssvMessage := &spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - } - - decodedMsg, _, err := s.msgValidator.ValidateSSVMessage(ssvMessage) - if err != nil { - logger.Debug("could not validate ssv message", zap.Error(err)) - return nil - } - - handler(decodedMsg) - - return nil - }, - ) - if err != nil { - logger.Debug("sync failed", zap.Error(err)) - } - return err -} - -// getDecidedByRange calls GetHistory in batches to retrieve all decided messages in the given range. -func (s *syncer) getDecidedByRange( - ctx context.Context, - logger *zap.Logger, - mid spectypes.MessageID, - from, to specqbft.Height, - handler func(*specqbft.SignedMessage) error, -) error { - const maxRetries = 2 - - var ( - visited = make(map[specqbft.Height]struct{}) - msgs []protocolp2p.SyncResult - ) - - tail := from - var err error - for tail < to { - if ctx.Err() != nil { - return ctx.Err() - } - err := tasks.RetryWithContext(ctx, func() error { - start := time.Now() - msgs, tail, err = s.network.GetHistory(logger, mid, tail, to) - if err != nil { - return err - } - handled := 0 - protocolp2p.SyncResults(msgs).ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if _, ok := visited[m.Message.Height]; ok { - return false - } - if err := handler(m); err != nil { - logger.Warn("could not handle signed message") - } - handled++ - visited[m.Message.Height] = struct{}{} - return false - }) - logger.Debug("received and processed history batch", - zap.Int64("tail", int64(tail)), - fields.Duration(start), - zap.Int("results_count", len(msgs)), - fields.SyncResults(msgs), - zap.Int("handled", handled)) - return nil - }, maxRetries) - if err != nil { - return err - } - } - - return nil -} diff --git a/network/syncing/syncer_test.go b/network/syncing/syncer_test.go deleted file mode 100644 index 3bd01c5486..0000000000 --- a/network/syncing/syncer_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package syncing_test - -import ( - "context" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/network/syncing" - "github.com/bloxapp/ssv/protocol/v2/ssv/queue" -) - -type mockSyncer struct{} - -func (m *mockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler syncing.MessageHandler) error { - return nil -} - -func (m *mockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, from specqbft.Height, to specqbft.Height, handler syncing.MessageHandler) error { - return nil -} - -type mockMessageHandler struct { - calls int - handler syncing.MessageHandler -} - -func newMockMessageHandler() *mockMessageHandler { - m := &mockMessageHandler{} - m.handler = func(msg *queue.DecodedSSVMessage) { - m.calls++ - } - return m -} diff --git a/operator/duties/attester.go b/operator/duties/attester.go index 9f3c1c1283..f89cbaf867 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -249,8 +249,7 @@ func (h *AttesterHandler) shouldExecute(duty *eth2apiv1.AttesterDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index 7e26de30b3..c3c22ebbe2 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -56,3 +56,8 @@ func (h *baseHandler) Setup( h.reorg = reorgEvents h.indicesChange = indicesChange } + +func (h *baseHandler) warnMisalignedSlotAndDuty(dutyType string) { + h.logger.Debug("current slot and duty slot are not aligned, "+ + "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", dutyType)) +} diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/mocks/scheduler.go index e9e6c8026f..7195d58dcd 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/mocks/scheduler.go @@ -214,20 +214,6 @@ func (mr *MockValidatorControllerMockRecorder) AllActiveIndices(epoch interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).AllActiveIndices), epoch) } -// GetOperatorShares mocks base method. -func (m *MockValidatorController) GetOperatorShares() []*types.SSVShare { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOperatorShares") - ret0, _ := ret[0].([]*types.SSVShare) - return ret0 -} - -// GetOperatorShares indicates an expected call of GetOperatorShares. -func (mr *MockValidatorControllerMockRecorder) GetOperatorShares() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperatorShares", reflect.TypeOf((*MockValidatorController)(nil).GetOperatorShares)) -} - // CommitteeActiveIndices mocks base method. func (m *MockValidatorController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() @@ -241,3 +227,17 @@ func (mr *MockValidatorControllerMockRecorder) CommitteeActiveIndices(epoch inte mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).CommitteeActiveIndices), epoch) } + +// GetOperatorShares mocks base method. +func (m *MockValidatorController) GetOperatorShares() []*types.SSVShare { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOperatorShares") + ret0, _ := ret[0].([]*types.SSVShare) + return ret0 +} + +// GetOperatorShares indicates an expected call of GetOperatorShares. +func (mr *MockValidatorControllerMockRecorder) GetOperatorShares() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperatorShares", reflect.TypeOf((*MockValidatorController)(nil).GetOperatorShares)) +} diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 89e96cca94..d65b25b0e1 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -185,8 +185,7 @@ func (h *ProposerHandler) shouldExecute(duty *eth2apiv1.ProposerDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/sync_committee.go b/operator/duties/sync_committee.go index 1d99e930a0..03c2e60037 100644 --- a/operator/duties/sync_committee.go +++ b/operator/duties/sync_committee.go @@ -260,8 +260,7 @@ func (h *SyncCommitteeHandler) shouldExecute(duty *eth2apiv1.SyncCommitteeDuty, return true } if currentSlot+1 == slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 3048a654d6..1b45e627e1 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -37,7 +37,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -250,22 +249,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { // setupNetworkHandlers registers all the required handlers for sync protocols func (c *controller) setupNetworkHandlers() error { - syncHandlers := []*p2pprotocol.SyncHandler{ - p2pprotocol.WithHandler( - p2pprotocol.LastDecidedProtocol, - handlers.LastDecidedHandler(c.logger, c.ibftStorageMap, c.network), - ), - } - if c.validatorOptions.FullNode { - syncHandlers = append( - syncHandlers, - p2pprotocol.WithHandler( - p2pprotocol.DecidedHistoryProtocol, - // TODO: extract maxBatch to config - handlers.HistoryHandler(c.logger, c.ibftStorageMap, c.network, c.historySyncBatchSize), - ), - ) - } + syncHandlers := []*p2pprotocol.SyncHandler{} c.logger.Debug("setting up network handlers", zap.Int("count", len(syncHandlers)), zap.Bool("full_node", c.validatorOptions.FullNode), @@ -473,25 +457,7 @@ func (c *controller) setupNonCommitteeValidators() { pubKeys := make([][]byte, 0, len(nonCommitteeShares)) for _, validatorShare := range nonCommitteeShares { pubKeys = append(pubKeys, validatorShare.ValidatorPubKey) - - opts := c.validatorOptions - opts.SSVShare = validatorShare - allRoles := []spectypes.BeaconRole{ - spectypes.BNRoleAttester, - spectypes.BNRoleAggregator, - spectypes.BNRoleProposer, - spectypes.BNRoleSyncCommittee, - spectypes.BNRoleSyncCommitteeContribution, - } - for _, role := range allRoles { - messageID := spectypes.NewMsgID(ssvtypes.GetDefaultDomain(), validatorShare.ValidatorPubKey, role) - err := c.network.SyncHighestDecided(messageID) - if err != nil { - c.logger.Error("failed to sync highest decided", zap.Error(err)) - } - } } - if len(pubKeys) > 0 { c.logger.Debug("updating metadata for non-committee validators", zap.Int("count", len(pubKeys))) if err := beaconprotocol.UpdateValidatorsMetadata(c.logger, pubKeys, c, c.beacon, c.onMetadataUpdated); err != nil { @@ -872,7 +838,7 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt qbftCtrl := buildController(spectypes.BNRoleAttester, valCheck) runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) case spectypes.BNRoleProposer: - proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey, options.BuilderProposals) + proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleProposer, proposedValueCheck) runners[role] = runner.NewProposerRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) runners[role].(*runner.ProposerRunner).ProducesBlindedBlocks = options.BuilderProposals // apply blinded block flag diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 38121bae8e..e7bad286b0 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -54,6 +54,20 @@ func (mr *MockControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockController)(nil).AllActiveIndices), epoch) } +// CommitteeActiveIndices mocks base method. +func (m *MockController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockController)(nil).CommitteeActiveIndices), epoch) +} + // ExecuteDuty mocks base method. func (m *MockController) ExecuteDuty(logger *zap.Logger, duty *types.Duty) { m.ctrl.T.Helper() @@ -126,20 +140,6 @@ func (mr *MockControllerMockRecorder) GetValidatorStats() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorStats", reflect.TypeOf((*MockController)(nil).GetValidatorStats)) } -// CommitteeActiveIndices mocks base method. -func (m *MockController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) - ret0, _ := ret[0].([]phase0.ValidatorIndex) - return ret0 -} - -// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. -func (mr *MockControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockController)(nil).CommitteeActiveIndices), epoch) -} - // IndicesChangeChan mocks base method. func (m *MockController) IndicesChangeChan() chan struct{} { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/mock_client.go b/protocol/v2/blockchain/beacon/mock_client.go index 7360109bd1..2c8fa64f4d 100644 --- a/protocol/v2/blockchain/beacon/mock_client.go +++ b/protocol/v2/blockchain/beacon/mock_client.go @@ -643,6 +643,20 @@ func (mr *MockBeaconNodeMockRecorder) SubmitValidatorRegistration(pubkey, feeRec return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitValidatorRegistration", reflect.TypeOf((*MockBeaconNode)(nil).SubmitValidatorRegistration), pubkey, feeRecipient, sig) } +// SubmitVoluntaryExit mocks base method. +func (m *MockBeaconNode) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitVoluntaryExit", voluntaryExit, sig) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitVoluntaryExit indicates an expected call of SubmitVoluntaryExit. +func (mr *MockBeaconNodeMockRecorder) SubmitVoluntaryExit(voluntaryExit, sig interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitVoluntaryExit", reflect.TypeOf((*MockBeaconNode)(nil).SubmitVoluntaryExit), voluntaryExit, sig) +} + // SyncCommitteeDuties mocks base method. func (m *MockBeaconNode) SyncCommitteeDuties(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.SyncCommitteeDuty, error) { m.ctrl.T.Helper() diff --git a/protocol/v2/p2p/network.go b/protocol/v2/p2p/network.go index 8e9f99a78d..bd201dddda 100644 --- a/protocol/v2/p2p/network.go +++ b/protocol/v2/p2p/network.go @@ -132,21 +132,6 @@ func WithHandler(protocol SyncProtocol, handler RequestHandler) *SyncHandler { } } -// Syncer holds the interface for syncing data from other peers -type Syncer interface { - specqbft.Syncer - // GetHistory sync the given range from a set of peers that supports history for the given identifier - // it accepts a list of targets for the request. - GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]SyncResult, specqbft.Height, error) - - // RegisterHandlers registers handler for the given protocol - RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) - - // LastDecided fetches last decided from a random set of peers - // TODO: replace with specqbft.SyncHighestDecided - LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]SyncResult, error) -} - // MsgValidationResult helps other components to report message validation with a generic results scheme type MsgValidationResult int32 @@ -173,6 +158,8 @@ type ValidationReporting interface { type Network interface { Subscriber Broadcaster - Syncer ValidationReporting + + // RegisterHandlers registers handler for the given protocol + RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) } diff --git a/protocol/v2/qbft/controller/controller.go b/protocol/v2/qbft/controller/controller.go index 84abc6600f..dd786dc993 100644 --- a/protocol/v2/qbft/controller/controller.go +++ b/protocol/v2/qbft/controller/controller.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "encoding/json" + "fmt" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -24,14 +25,12 @@ type Controller struct { Identifier []byte Height specqbft.Height // incremental Height for InstanceContainer // StoredInstances stores the last HistoricalInstanceCapacity in an array for message processing purposes. - StoredInstances InstanceContainer - // FutureMsgsContainer holds all msgs from a higher height - FutureMsgsContainer map[spectypes.OperatorID]specqbft.Height // maps msg signer to height of higher height received msgs - Domain spectypes.DomainType - Share *spectypes.Share - NewDecidedHandler NewDecidedHandler `json:"-"` - config qbft.IConfig - fullNode bool + StoredInstances InstanceContainer + Domain spectypes.DomainType + Share *spectypes.Share + NewDecidedHandler NewDecidedHandler `json:"-"` + config qbft.IConfig + fullNode bool } func NewController( @@ -42,14 +41,13 @@ func NewController( fullNode bool, ) *Controller { return &Controller{ - Identifier: identifier, - Height: specqbft.FirstHeight, - Domain: domain, - Share: share, - StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), - FutureMsgsContainer: make(map[spectypes.OperatorID]specqbft.Height), - config: config, - fullNode: fullNode, + Identifier: identifier, + Height: specqbft.FirstHeight, + Domain: domain, + Share: share, + StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), + config: config, + fullNode: fullNode, } } @@ -100,10 +98,9 @@ func (c *Controller) ProcessMsg(logger *zap.Logger, msg *specqbft.SignedMessage) if IsDecidedMsg(c.Share, msg) { return c.UponDecided(logger, msg) } else if c.isFutureMessage(msg) { - return c.UponFutureMsg(logger, msg) - } else { - return c.UponExistingInstanceMsg(logger, msg) + return nil, fmt.Errorf("future msg from height, could not process") } + return c.UponExistingInstanceMsg(logger, msg) } func (c *Controller) UponExistingInstanceMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { diff --git a/protocol/v2/qbft/controller/decided.go b/protocol/v2/qbft/controller/decided.go index 6c239a5a90..f9b694bc8e 100644 --- a/protocol/v2/qbft/controller/decided.go +++ b/protocol/v2/qbft/controller/decided.go @@ -67,8 +67,6 @@ func (c *Controller) UponDecided(logger *zap.Logger, msg *specqbft.SignedMessage } if isFutureDecided { - // sync gap - c.GetConfig().GetNetwork().SyncDecidedByRange(spectypes.MessageIDFromBytes(c.Identifier), c.Height, msg.Message.Height) // bump height c.Height = msg.Message.Height } diff --git a/protocol/v2/qbft/controller/future_msg.go b/protocol/v2/qbft/controller/future_msg.go deleted file mode 100644 index 7c93cd0fe2..0000000000 --- a/protocol/v2/qbft/controller/future_msg.go +++ /dev/null @@ -1,77 +0,0 @@ -package controller - -import ( - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -func (c *Controller) UponFutureMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { - if err := ValidateFutureMsg(c.GetConfig(), msg, c.Share.Committee); err != nil { - return nil, errors.Wrap(err, "invalid future msg") - } - if !c.addHigherHeightMsg(msg) { - return nil, errors.New("discarded future msg") - } - if c.f1SyncTrigger() { - logger.Debug("🔀 triggered f+1 sync", - zap.Uint64("ctrl_height", uint64(c.Height)), - zap.Uint64("msg_height", uint64(msg.Message.Height))) - return nil, c.GetConfig().GetNetwork().SyncHighestDecided(spectypes.MessageIDFromBytes(c.Identifier)) - } - return nil, nil -} - -func ValidateFutureMsg( - config qbft.IConfig, - msg *specqbft.SignedMessage, - operators []*spectypes.Operator, -) error { - if err := msg.Validate(); err != nil { - return errors.Wrap(err, "invalid decided msg") - } - - if len(msg.GetSigners()) != 1 { - return errors.New("allows 1 signer") - } - - if config.VerifySignatures() { - if err := types.VerifyByOperators(msg.Signature, msg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") - } - } - - return nil -} - -// addHigherHeightMsg verifies msg, cleanup queue and adds the message if unique signer -func (c *Controller) addHigherHeightMsg(msg *specqbft.SignedMessage) bool { - // cleanup lower height msgs - cleanedQueue := make(map[spectypes.OperatorID]specqbft.Height) - signerExists := false - for signer, height := range c.FutureMsgsContainer { - if height <= c.Height { - continue - } - - if signer == msg.GetSigners()[0] { - signerExists = true - } - cleanedQueue[signer] = height - } - - if !signerExists { - cleanedQueue[msg.GetSigners()[0]] = msg.Message.Height - } - c.FutureMsgsContainer = cleanedQueue - return !signerExists -} - -// f1SyncTrigger returns true if received f+1 higher height messages from unique signers -func (c *Controller) f1SyncTrigger() bool { - return c.Share.HasPartialQuorum(len(c.FutureMsgsContainer)) -} diff --git a/protocol/v2/qbft/instance/instance.go b/protocol/v2/qbft/instance/instance.go index 7441a1df30..f0d99e92cd 100644 --- a/protocol/v2/qbft/instance/instance.go +++ b/protocol/v2/qbft/instance/instance.go @@ -95,13 +95,9 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh } func (i *Instance) Broadcast(logger *zap.Logger, msg *specqbft.SignedMessage) error { - // logger.Debug("Broadcast", - // zap.Any("MsgType", msg.Message.MsgType), - // fields.Round(msg.Message.Round), - // zap.Any("DataRound", msg.Message.DataRound), - // fields.Height(msg.Message.Height), - // ) - + if !i.CanProcessMessages() { + return errors.New("instance stopped processing messages") + } byts, err := msg.Encode() if err != nil { return errors.Wrap(err, "could not encode message") diff --git a/protocol/v2/qbft/instance/marshalutils.go b/protocol/v2/qbft/instance/marshalutils.go new file mode 100644 index 0000000000..ba76e75453 --- /dev/null +++ b/protocol/v2/qbft/instance/marshalutils.go @@ -0,0 +1,47 @@ +package instance + +import "encoding/json" + +/////////////////////// JSON Marshalling for Tests /////////////////////// + +// region: JSON Marshalling for Instance + +// MarshalJSON is a custom JSON marshaller for Instance +func (i *Instance) MarshalJSON() ([]byte, error) { + type Alias Instance + if i.forceStop { + return json.Marshal(&struct { + ForceStop bool `json:"forceStop"` + *Alias + }{ + ForceStop: i.forceStop, + Alias: (*Alias)(i), + }) + } else { + return json.Marshal(&struct { + *Alias + }{ + Alias: (*Alias)(i), + }) + } +} + +// UnmarshalJSON is a custom JSON unmarshaller for Instance +func (i *Instance) UnmarshalJSON(data []byte) error { + type Alias Instance + aux := &struct { + ForceStop *bool `json:"forceStop,omitempty"` + *Alias + }{ + Alias: (*Alias)(i), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ForceStop != nil { + i.forceStop = *aux.ForceStop + } + return nil +} + +// endregion: JSON Marshalling for Instance diff --git a/protocol/v2/qbft/spectest/controller_sync_type.go b/protocol/v2/qbft/spectest/controller_sync_type.go deleted file mode 100644 index 08fc7b2332..0000000000 --- a/protocol/v2/qbft/spectest/controller_sync_type.go +++ /dev/null @@ -1,55 +0,0 @@ -package qbft - -import ( - "encoding/hex" - "testing" - - qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/bloxapp/ssv/protocol/v2/types" - - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" - spectypes "github.com/bloxapp/ssv-spec/types" - spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/stretchr/testify/require" -) - -func RunControllerSync(t *testing.T, test *futuremsg.ControllerSyncSpecTest) { - logger := logging.TestLogger(t) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), spectestingutils.TestingValidatorPubKey[:], spectypes.BNRoleAttester) - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), identifier.GetRoleType()) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) - - err := contr.StartNewInstance(logger, 0, []byte{1, 2, 3, 4}) - if err != nil { - t.Fatalf(err.Error()) - } - - var lastErr error - for _, msg := range test.InputMessages { - logger = logger.With(fields.Height(msg.Message.Height)) - _, err := contr.ProcessMsg(logger, msg) - if err != nil { - lastErr = err - } - } - - syncedDecidedCnt := config.GetNetwork().(*spectestingutils.TestingNetwork).SyncHighestDecidedCnt - require.EqualValues(t, test.SyncDecidedCalledCnt, syncedDecidedCnt) - - r, err := contr.GetRoot() - require.NoError(t, err) - require.EqualValues(t, test.ControllerPostRoot, hex.EncodeToString(r[:])) - - if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) - } else { - require.NoError(t, lastErr) - } -} diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index b9e03e6197..a919cc104b 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -3,6 +3,10 @@ package qbft import ( "bytes" "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" "reflect" "testing" @@ -10,6 +14,7 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" @@ -18,22 +23,23 @@ import ( "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + //temporary to override state comparisons from file not inputted one + overrideStateComparisonForControllerSpecTest(t, test) + logger := logging.TestLogger(t) - identifier := []byte{1, 2, 3, 4} - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) + contr := generateController(logger) var lastErr error for i, runData := range test.RunInstanceData { - if err := runInstanceWithData(t, logger, specqbft.Height(i), contr, config, identifier, runData); err != nil { + height := specqbft.Height(i) + if runData.Height != nil { + height = *runData.Height + } + if err := runInstanceWithData(t, logger, height, contr, runData); err != nil { lastErr = err } } @@ -45,6 +51,17 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { } } +func generateController(logger *zap.Logger) *controller.Controller { + identifier := []byte{1, 2, 3, 4} + config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) + return qbfttesting.NewTestingQBFTController( + identifier[:], + spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), + config, + false, + ) +} + func testTimer( t *testing.T, config *qbft.Config, @@ -80,13 +97,6 @@ func testProcessMsg( } require.EqualValues(t, runData.ExpectedDecidedState.DecidedCnt, decidedCnt, lastErr) - // verify sync decided by range calls - if runData.ExpectedDecidedState.CalledSyncDecidedByRange { - require.EqualValues(t, runData.ExpectedDecidedState.DecidedByRangeValues, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } else { - require.EqualValues(t, [2]specqbft.Height{0, 0}, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } - return lastErr } @@ -130,20 +140,20 @@ func testBroadcastedDecided( } } -func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, config *qbft.Config, identifier []byte, runData *spectests.RunInstanceData) error { +func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, runData *spectests.RunInstanceData) error { err := contr.StartNewInstance(logger, height, runData.InputValue) var lastErr error if err != nil { lastErr = err } - testTimer(t, config, runData) + testTimer(t, contr.GetConfig().(*qbft.Config), runData) - if err := testProcessMsg(t, logger, contr, config, runData); err != nil { + if err := testProcessMsg(t, logger, contr, contr.GetConfig().(*qbft.Config), runData); err != nil { lastErr = err } - testBroadcastedDecided(t, config, identifier, runData) + testBroadcastedDecided(t, contr.GetConfig().(*qbft.Config), contr.Identifier, runData) // test root r, err := contr.GetRoot() @@ -152,3 +162,24 @@ func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Heigh return lastErr } + +func overrideStateComparisonForControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + specDir = filepath.Join(specDir, "generate") + dir := typescomparable.GetSCDir(specDir, reflect.TypeOf(test).String()) + path := filepath.Join(dir, fmt.Sprintf("%s.json", test.TestName())) + byteValue, err := os.ReadFile(filepath.Clean(path)) + require.NoError(t, err) + sc := make([]*controller.Controller, len(test.RunInstanceData)) + require.NoError(t, json.Unmarshal(byteValue, &sc)) + + for i, runData := range test.RunInstanceData { + runData.ControllerPostState = sc[i] + + r, err := sc[i].GetRoot() + require.NoError(t, err) + + runData.ControllerPostRoot = hex.EncodeToString(r[:]) + } +} diff --git a/protocol/v2/qbft/spectest/msg_processing_type.go b/protocol/v2/qbft/spectest/msg_processing_type.go index 63c8922862..15606c2ece 100644 --- a/protocol/v2/qbft/spectest/msg_processing_type.go +++ b/protocol/v2/qbft/spectest/msg_processing_type.go @@ -3,6 +3,8 @@ package qbft import ( "encoding/hex" "fmt" + "path/filepath" + "reflect" "testing" "time" @@ -10,15 +12,19 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" "github.com/stretchr/testify/require" ) // RunMsgProcessing processes MsgProcessingSpecTest. It probably may be removed. func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { + overrideStateComparisonForMsgProcessingSpecTest(t, test) + // a little trick we do to instantiate all the internal instance params preByts, _ := test.Pre.Encode() msgId := specqbft.ControllerIdToMessageID(test.Pre.State.ID) @@ -49,7 +55,7 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected %v, but got %v", test.ExpectedError, lastErr) } else { require.NoError(t, lastErr) } @@ -78,3 +84,22 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { require.EqualValues(t, test.PostRoot, hex.EncodeToString(postRoot[:]), "post root not valid") } + +func overrideStateComparisonForMsgProcessingSpecTest(t *testing.T, test *spectests.MsgProcessingSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + test.PostState, err = typescomparable.UnmarshalStateComparison(specDir, test.TestName(), + reflect.TypeOf(test).String(), + &specqbft.State{}) + require.NoError(t, err) + + r, err := test.PostState.GetRoot() + require.NoError(t, err) + + // backwards compatability test, hard coded post root must be equal to the one loaded from file + if len(test.PostRoot) > 0 { + require.EqualValues(t, test.PostRoot, hex.EncodeToString(r[:])) + } + + test.PostRoot = hex.EncodeToString(r[:]) +} diff --git a/protocol/v2/qbft/spectest/qbft_mapping_test.go b/protocol/v2/qbft/spectest/qbft_mapping_test.go index 082d06d54a..00903a0adc 100644 --- a/protocol/v2/qbft/spectest/qbft_mapping_test.go +++ b/protocol/v2/qbft/spectest/qbft_mapping_test.go @@ -8,7 +8,6 @@ import ( "testing" spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" "github.com/bloxapp/ssv-spec/qbft/spectest/tests/timeout" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" @@ -92,16 +91,6 @@ func TestQBFTMapping(t *testing.T) { /*t.Run(typedTest.TestName(), func(t *testing.T) { RunMsg(t, typedTest) })*/ - case reflect.TypeOf(&futuremsg.ControllerSyncSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &futuremsg.ControllerSyncSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { - t.Parallel() - RunControllerSync(t, typedTest) - }) case reflect.TypeOf(&timeout.SpecTest{}).String(): byts, err := json.Marshal(test) require.NoError(t, err) diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index ac305585e2..1fc2225e15 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -97,6 +97,9 @@ func NewBaseRunner( // baseStartNewDuty is a base func that all runner implementation can call to start a duty func (b *BaseRunner) baseStartNewDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessDuty(duty); err != nil { + return errors.Wrap(err, "can't start duty") + } b.baseSetupForNewDuty(duty) return runner.executeDuty(logger, duty) } @@ -266,3 +269,11 @@ func (b *BaseRunner) hasRunningDuty() bool { } return !b.State.Finished } + +func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { + if b.QBFTController.Height >= specqbft.Height(duty.Slot) { + return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + b.QBFTController.Height) + } + return nil +} diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 10bf6a39fe..8cbf53fab2 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -53,7 +53,13 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because + // that requires a QBFTController which ValidatorRegistrationRunner doesn't have. + if r.HasRunningDuty() { + return errors.New("already running duty") + } + r.BaseRunner.baseSetupForNewDuty(duty) + return r.executeDuty(logger, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go new file mode 100644 index 0000000000..7eba30c616 --- /dev/null +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -0,0 +1,232 @@ +package runner + +import ( + "crypto/sha256" + "encoding/json" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + specssv "github.com/bloxapp/ssv-spec/ssv" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/protocol/v2/ssv/runner/metrics" + ssz "github.com/ferranbt/fastssz" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// Duty runner for validator voluntary exit duty +type VoluntaryExitRunner struct { + BaseRunner *BaseRunner + + beacon specssv.BeaconNode + network specssv.Network + signer spectypes.KeyManager + valCheck specqbft.ProposedValueCheckF + + voluntaryExit *phase0.VoluntaryExit + + metrics metrics.ConsensusMetrics +} + +func NewVoluntaryExitRunner( + beaconNetwork spectypes.BeaconNetwork, + share *spectypes.Share, + beacon specssv.BeaconNode, + network specssv.Network, + signer spectypes.KeyManager, +) Runner { + return &VoluntaryExitRunner{ + BaseRunner: &BaseRunner{ + BeaconRoleType: spectypes.BNRoleVoluntaryExit, + BeaconNetwork: beaconNetwork, + Share: share, + }, + + beacon: beacon, + network: network, + signer: signer, + metrics: metrics.NewConsensusMetrics(spectypes.BNRoleValidatorRegistration), + } +} + +func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { + // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because + // that requires a QBFTController which VoluntaryExitRunner doesn't have. + if r.HasRunningDuty() { + return errors.New("already running duty") + } + r.BaseRunner.baseSetupForNewDuty(duty) + return r.executeDuty(logger, duty) +} + +// HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) +func (r *VoluntaryExitRunner) HasRunningDuty() bool { + return r.BaseRunner.hasRunningDuty() +} + +// Check for quorum of partial signatures over VoluntaryExit and, +// if has quorum, constructs SignedVoluntaryExit and submits to BeaconNode +func (r *VoluntaryExitRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + quorum, roots, err := r.BaseRunner.basePreConsensusMsgProcessing(r, signedMsg) + if err != nil { + return errors.Wrap(err, "failed processing voluntary exit message") + } + + // quorum returns true only once (first time quorum achieved) + if !quorum { + return nil + } + + // only 1 root, verified in basePreConsensusMsgProcessing + root := roots[0] + fullSig, err := r.GetState().ReconstructBeaconSig(r.GetState().PreConsensusContainer, root, r.GetShare().ValidatorPubKey) + if err != nil { + return errors.Wrap(err, "could not reconstruct voluntary exit sig") + } + specSig := phase0.BLSSignature{} + copy(specSig[:], fullSig) + + // create SignedVoluntaryExit using VoluntaryExit created on r.executeDuty() and reconstructed signature + signedVoluntaryExit := &phase0.SignedVoluntaryExit{ + Message: r.voluntaryExit, + Signature: specSig, + } + + if err := r.beacon.SubmitVoluntaryExit(signedVoluntaryExit, specSig); err != nil { + return errors.Wrap(err, "could not submit voluntary exit") + } + + r.GetState().Finished = true + return nil +} + +func (r *VoluntaryExitRunner) ProcessConsensus(logger *zap.Logger, signedMsg *specqbft.SignedMessage) error { + return errors.New("no consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + return errors.New("no post consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + vr, err := r.calculateVoluntaryExit() + if err != nil { + return nil, spectypes.DomainError, errors.Wrap(err, "could not calculate voluntary exit") + } + return []ssz.HashRoot{vr}, spectypes.DomainVoluntaryExit, nil +} + +// expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign +func (r *VoluntaryExitRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + return nil, [4]byte{}, errors.New("no post consensus roots for voluntary exit") +} + +// Validator voluntary exit duty doesn't need consensus nor post-consensus. +// It just performs pre-consensus with VoluntaryExitPartialSig over +// a VoluntaryExit object to create a SignedVoluntaryExit +func (r *VoluntaryExitRunner) executeDuty(logger *zap.Logger, duty *spectypes.Duty) error { + voluntaryExit, err := r.calculateVoluntaryExit() + if err != nil { + return errors.Wrap(err, "could not calculate voluntary exit") + } + + // get PartialSignatureMessage with voluntaryExit root and signature + msg, err := r.BaseRunner.signBeaconObject(r, voluntaryExit, duty.Slot, spectypes.DomainVoluntaryExit) + if err != nil { + return errors.Wrap(err, "could not sign VoluntaryExit object") + } + + msgs := spectypes.PartialSignatureMessages{ + Type: spectypes.VoluntaryExitPartialSig, + Slot: duty.Slot, + Messages: []*spectypes.PartialSignatureMessage{msg}, + } + + // sign PartialSignatureMessages object + signature, err := r.GetSigner().SignRoot(msgs, spectypes.PartialSignatureType, r.GetShare().SharePubKey) + if err != nil { + return errors.Wrap(err, "could not sign randao msg") + } + signedPartialMsg := &spectypes.SignedPartialSignatureMessage{ + Message: msgs, + Signature: signature, + Signer: r.GetShare().OperatorID, + } + + // broadcast + data, err := signedPartialMsg.Encode() + if err != nil { + return errors.Wrap(err, "failed to encode signedPartialMsg with VoluntaryExit") + } + msgToBroadcast := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(r.GetShare().DomainType, r.GetShare().ValidatorPubKey, r.BaseRunner.BeaconRoleType), + Data: data, + } + if err := r.GetNetwork().Broadcast(msgToBroadcast); err != nil { + return errors.Wrap(err, "can't broadcast signedPartialMsg with VoluntaryExit") + } + + // stores value for later using in ProcessPreConsensus + r.voluntaryExit = voluntaryExit + + return nil +} + +// Returns *phase0.VoluntaryExit object with current epoch and own validator index +func (r *VoluntaryExitRunner) calculateVoluntaryExit() (*phase0.VoluntaryExit, error) { + epoch := r.BaseRunner.BeaconNetwork.EstimatedEpochAtSlot(r.BaseRunner.State.StartingDuty.Slot) + validatorIndex := r.GetState().StartingDuty.ValidatorIndex + return &phase0.VoluntaryExit{ + Epoch: epoch, + ValidatorIndex: validatorIndex, + }, nil +} + +func (r *VoluntaryExitRunner) GetBaseRunner() *BaseRunner { + return r.BaseRunner +} + +func (r *VoluntaryExitRunner) GetNetwork() specssv.Network { + return r.network +} + +func (r *VoluntaryExitRunner) GetBeaconNode() specssv.BeaconNode { + return r.beacon +} + +func (r *VoluntaryExitRunner) GetShare() *spectypes.Share { + return r.BaseRunner.Share +} + +func (r *VoluntaryExitRunner) GetState() *State { + return r.BaseRunner.State +} + +func (r *VoluntaryExitRunner) GetValCheckF() specqbft.ProposedValueCheckF { + return r.valCheck +} + +func (r *VoluntaryExitRunner) GetSigner() spectypes.KeyManager { + return r.signer +} + +// Encode returns the encoded struct in bytes or error +func (r *VoluntaryExitRunner) Encode() ([]byte, error) { + return json.Marshal(r) +} + +// Decode returns error if decoding failed +func (r *VoluntaryExitRunner) Decode(data []byte) error { + return json.Unmarshal(data, &r) +} + +// GetRoot returns the root used for signing and verification +func (r *VoluntaryExitRunner) GetRoot() ([32]byte, error) { + marshaledRoot, err := r.Encode() + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not encode DutyRunnerState") + } + ret := sha256.Sum256(marshaledRoot) + return ret, nil +} diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index e4cbe76036..412b92b8da 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -2,6 +2,9 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" specqbft "github.com/bloxapp/ssv-spec/qbft" @@ -9,12 +12,15 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + "go.uber.org/zap" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" ssvtesting "github.com/bloxapp/ssv/protocol/v2/ssv/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type MsgProcessingSpecTest struct { @@ -23,6 +29,7 @@ type MsgProcessingSpecTest struct { Duty *spectypes.Duty Messages []*spectypes.SSVMessage PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json // OutputMessages compares pre/ post signed partial sigs to output. We exclude consensus msgs as it's tested in consensus OutputMessages []*spectypes.SignedPartialSignatureMessage BeaconBroadcastedRoots []string @@ -36,7 +43,11 @@ func (test *MsgProcessingSpecTest) TestName() string { func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { logger := logging.TestLogger(t) + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} +func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { test.Runner.GetBaseRunner().VerifySignatures = true v := ssvtesting.BaseValidator(logger, spectestingutils.KeySetForShare(test.Runner.GetBaseRunner().Share)) @@ -146,3 +157,43 @@ func (test *MsgProcessingSpecTest) compareOutputMsgs(t *testing.T, v *validator. index++ } } + +func (test *MsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, test.Name, testType) +} + +func overrideStateComparison(t *testing.T, test *MsgProcessingSpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/multi_msg_processing_type.go b/protocol/v2/ssv/spectest/multi_msg_processing_type.go index 0b4b926f6e..4d040782e2 100644 --- a/protocol/v2/ssv/spectest/multi_msg_processing_type.go +++ b/protocol/v2/ssv/spectest/multi_msg_processing_type.go @@ -1,10 +1,20 @@ package spectest -import "testing" +import ( + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/bloxapp/ssv/logging" + "go.uber.org/zap" +) type MultiMsgProcessingSpecTest struct { Name string Tests []*MsgProcessingSpecTest + + logger *zap.Logger } func (tests *MultiMsgProcessingSpecTest) TestName() string { @@ -12,10 +22,23 @@ func (tests *MultiMsgProcessingSpecTest) TestName() string { } func (tests *MultiMsgProcessingSpecTest) Run(t *testing.T) { + tests.logger = logging.TestLogger(t) + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - RunMsgProcessing(t, test) + test.RunAsPartOfMultiTest(t, tests.logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiMsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, path, testType) + } +} diff --git a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go index c8bf0cae80..cfac13ec9d 100644 --- a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go +++ b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go @@ -2,14 +2,19 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type StartNewRunnerDutySpecTest struct { @@ -17,6 +22,7 @@ type StartNewRunnerDutySpecTest struct { Runner runner.Runner Duty *spectypes.Duty PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json OutputMessages []*spectypes.SignedPartialSignatureMessage ExpectedError string } @@ -25,7 +31,14 @@ func (test *StartNewRunnerDutySpecTest) TestName() string { return test.Name } -func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { +// overrideStateComparison overrides the state comparison to compare the runner state +func (test *StartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, test.Name, testType) +} + +func (test *StartNewRunnerDutySpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { err := test.Runner.StartNewDuty(logger, test.Duty) if len(test.ExpectedError) > 0 { require.EqualError(t, err, test.ExpectedError) @@ -84,6 +97,11 @@ func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:])) } +func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + type MultiStartNewRunnerDutySpecTest struct { Name string Tests []*StartNewRunnerDutySpecTest @@ -94,10 +112,56 @@ func (tests *MultiStartNewRunnerDutySpecTest) TestName() string { } func (tests *MultiStartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - test.Run(t, logger) + test.RunAsPartOfMultiTest(t, logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiStartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, path, testType) + } +} + +func overrideStateComparisonForStartNewRunnerDutySpecTest(t *testing.T, test *StartNewRunnerDutySpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index ba90b22767..14fac24b35 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -69,8 +69,9 @@ func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{} typedTest := &MsgProcessingSpecTest{ Runner: &runner.AttesterRunner{}, } - // TODO fix blinded test + // TODO: fix blinded test if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { + logger.Info("skipping blinded block test", zap.String("test", testName)) return nil } require.NoError(t, json.Unmarshal(byts, &typedTest)) @@ -346,6 +347,10 @@ func baseRunnerForRole(logger *zap.Logger, role spectypes.BeaconRole, base *runn ret := ssvtesting.ValidatorRegistrationRunner(logger, ks) ret.(*runner.ValidatorRegistrationRunner).BaseRunner = base return ret + case spectypes.BNRoleVoluntaryExit: + ret := ssvtesting.VoluntaryExitRunner(logger, ks) + ret.(*runner.VoluntaryExitRunner).BaseRunner = base + return ret case testingutils.UnknownDutyType: ret := ssvtesting.UnknownDutyTypeRunner(logger, ks) ret.(*runner.AttesterRunner).BaseRunner = base diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 2d8fcc8095..7689d10073 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -23,14 +23,14 @@ var AttesterRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySe //} var ProposerRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { - return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), keySet) + return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet) } var ProposerBlindedBlockRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { ret := baseRunner( logger, spectypes.BNRoleProposer, - specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), + specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet, ) ret.(*runner.ProposerRunner).ProducesBlindedBlocks = true @@ -54,6 +54,10 @@ var ValidatorRegistrationRunner = func(logger *zap.Logger, keySet *spectestingut return ret } +var VoluntaryExitRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { + return baseRunner(logger, spectypes.BNRoleVoluntaryExit, nil, keySet) +} + var UnknownDutyTypeRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { return baseRunner(logger, spectestingutils.UnknownDutyType, spectestingutils.UnknownDutyValueCheck(), keySet) } @@ -144,6 +148,14 @@ var baseRunner = func(logger *zap.Logger, role spectypes.BeaconRole, valCheck sp net, km, ) + case spectypes.BNRoleVoluntaryExit: + return runner.NewVoluntaryExitRunner( + spectypes.BeaconTestNetwork, + share, + spectestingutils.NewTestingBeaconNode(), + net, + km, + ) case spectestingutils.UnknownDutyType: ret := runner.NewAttesterRunnner( spectypes.BeaconTestNetwork, diff --git a/protocol/v2/ssv/validator/startup.go b/protocol/v2/ssv/validator/startup.go index 4ca2c8acea..b316e8c9f2 100644 --- a/protocol/v2/ssv/validator/startup.go +++ b/protocol/v2/ssv/validator/startup.go @@ -1,9 +1,7 @@ package validator import ( - "context" "sync/atomic" - "time" "github.com/bloxapp/ssv-spec/p2p" spectypes "github.com/bloxapp/ssv-spec/types" @@ -56,7 +54,6 @@ func (v *Validator) Start(logger *zap.Logger) (started bool, err error) { return true, err } go v.StartQueueConsumer(logger, identifier, v.ProcessMessage) - go v.sync(logger, identifier) } return true, nil } @@ -73,27 +70,3 @@ func (v *Validator) Stop() { v.Queues = make(map[spectypes.BeaconRole]queueContainer) } } - -// sync performs highest decided sync -func (v *Validator) sync(logger *zap.Logger, mid spectypes.MessageID) { - ctx, cancel := context.WithCancel(v.ctx) - defer cancel() - - // TODO: config? - interval := time.Second - retries := 3 - - for ctx.Err() == nil { - err := v.Network.SyncHighestDecided(mid) - if err != nil { - logger.Debug("❌ failed to sync highest decided", zap.Error(err)) - retries-- - if retries > 0 { - interval *= 2 - time.Sleep(interval) - continue - } - } - return - } -} diff --git a/protocol/v2/sync/handlers/decided_history.go b/protocol/v2/sync/handlers/decided_history.go deleted file mode 100644 index 3dc960cfcb..0000000000 --- a/protocol/v2/sync/handlers/decided_history.go +++ /dev/null @@ -1,57 +0,0 @@ -package handlers - -import ( - "fmt" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// HistoryHandler handler for decided history protocol -// TODO: add msg validation and report scores -func HistoryHandler(logger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting, maxBatchSize int) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := logger.With(zap.String("msg_id", fmt.Sprintf("%x", msg.MsgID))) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.DecidedHistoryType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - items := int(sm.Params.Height[1] - sm.Params.Height[0]) - if items > maxBatchSize { - sm.Params.Height[1] = sm.Params.Height[0] + specqbft.Height(maxBatchSize) - } - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instances, err := store.GetInstancesInRange(msgID[:], sm.Params.Height[0], sm.Params.Height[1]) - results := make([]*specqbft.SignedMessage, 0, len(instances)) - for _, instance := range instances { - results = append(results, instance.DecidedMessage) - } - sm.UpdateResults(err, results...) - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/sync/handlers/last_decided.go b/protocol/v2/sync/handlers/last_decided.go deleted file mode 100644 index 6b33579b0f..0000000000 --- a/protocol/v2/sync/handlers/last_decided.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "fmt" - - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// LastDecidedHandler handler for last-decided protocol -// TODO: add msg validation and report scores -func LastDecidedHandler(plogger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := plogger.With(fields.PubKey(msg.MsgID.GetPubKey())) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.LastDecidedType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instance, err := store.GetHighestInstance(msgID[:]) - if err != nil { - logger.Debug("❗ failed to get highest instance", zap.Error(err)) - } else if instance != nil { - sm.UpdateResults(err, instance.DecidedMessage) - } - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/testing/test_utils.go b/protocol/v2/testing/test_utils.go index 2b2f79e4c1..7994e60361 100644 --- a/protocol/v2/testing/test_utils.go +++ b/protocol/v2/testing/test_utils.go @@ -1,6 +1,7 @@ package testing import ( + "fmt" "os" "path" "path/filepath" @@ -145,9 +146,25 @@ func AggregateInvalidSign(t *testing.T, sks map[spectypes.OperatorID]*bls.Secret } func GetSpecTestJSON(path string, module string) ([]byte, error) { + p, err := GetSpecDir(path, module) + if err != nil { + return nil, fmt.Errorf("could not get spec test dir: %w", err) + } + return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(specTestPath))) +} + +// GetSpecDir returns the path to the ssv-spec module. +func GetSpecDir(path, module string) (string, error) { + if path == "" { + var err error + path, err = os.Getwd() + if err != nil { + return "", errors.New("could not get current directory") + } + } goModFile, err := getGoModFile(path) if err != nil { - return nil, errors.New("could not get go.mod file") + return "", errors.New("could not get go.mod file") } // check if there is a replace @@ -173,7 +190,7 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { } } if req == nil { - return nil, errors.Errorf("could not find %s module", specModule) + return "", errors.Errorf("could not find %s module", specModule) } modPath = req.Mod.Path modVersion = req.Mod.Version @@ -182,14 +199,14 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { // get module path p, err := GetModulePath(modPath, modVersion) if err != nil { - return nil, errors.Wrap(err, "could not get module path") + return "", errors.Wrap(err, "could not get module path") } if _, err := os.Stat(p); os.IsNotExist(err) { - return nil, errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) + return "", errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) } - return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(module), filepath.Clean(specTestPath))) + return filepath.Join(filepath.Clean(p), module), nil } func GetModulePath(name, version string) (string, error) { diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 7369391e50..2440971fe0 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -11,7 +11,7 @@ ApprovedChanges: ["256a3dc0f1eb7abf","22b66e9a63ba145b","12c1c3a1622fb7cc","1c44 "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f", "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575","2a580187c312c79a","bf8cf93c55c1eadb","6d877e24991465e4", "b1c8e0148a4a755","2c25abb7c776bd54","a1754e08473bd1fa","4dbab14670fa155d","2a3667a499a23b16","930379d323dd95e8","65efe31656e8814f", - "1270cef2e573f846"] + "1270cef2e573f846","aeafb38ca9114f12","2a83e3384b45f2d7","91fbb874b3ce2570","74ad51ca63526e1e","defd8406641d53a5"] IgnoredIdentifiers: - logger diff --git a/utils/rsaencryption/testingspace/vars.go b/utils/rsaencryption/testingspace/vars.go index 27a90cc0de..f94a8da859 100644 --- a/utils/rsaencryption/testingspace/vars.go +++ b/utils/rsaencryption/testingspace/vars.go @@ -2,6 +2,7 @@ package testing var ( // SkPem is a operator private key + // #nosec G101 (Potential hardcoded credentials: RSA private key) SkPem = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAowE7OEbwyLkvrZ0TU4jjooyIFxNvgrY8Fj+WslyZTlyj8UDf\nFrYh5Un2u4YMdAe+cPf1XK+A/P9XX7OB4nf1OoGVB6wrC/jhLbvOH650ryUYopeY\nhlSXxGnD4vcvTvcqLLB+ue2/iySxQLpZR/6VsT3fFrEonzFTqnFCwCF28iPnJVBj\nX6T/HcTJ55IDkbtotarU6cwwNOHnHkzWrv7ityPkR4Ge11hmVG9QjROt56ehXfFs\nFo5MqSvqpYplXkI/zUNm8j/lqEdU0RXUr41L2hyKY/pVjsgmeTsN7/ZqACkHye9F\nbkV9V/VbTh7hWVLTqGSh7BY/D7gwOwfuKiq2TwIDAQABAoIBADjO3Qyn7JKHt44S\nCAI82thzkZo5M8uiJx652pMeom8k6h3SNe18XCPEuzBvbzeg20YTpHdA0vtZIeJA\ndSuwEs7pCj86SWZKvm9p3FQ+QHwpuYQwwP9Py/Svx4z6CIrEqPYaLJAvw2mCyCN+\nzk7A8vpqTa1i4H1ae4YTIuhCwWlxe1ttD6rVUYfC2rVaFJ+b8JlzFRq4bnAR8yme\nrE4iAlfgTOj9zL814qRlYQeeZhMvA8T0qWUohbr1imo5XzIJZayLocvqhZEbk0dj\nq9qKWdIpAATRjWvb+7PkjmlwNjLOhJ1phtCkc/S4j2cvo9gcS7WafxaqCl/ix4Yt\n5KvPJ8ECgYEA0Em4nMMEFXbuSM/l5UCzv3kT6H/TYO7FVh071G7QAFoloxJBZDFV\n7fHsc+uCimlG2Xt3CrGo9tsOnF/ZgDKNmtDvvjxmlPnAb5g4uhXgYNMsKQShpeRW\n/ay8CmWbsRqXZaLoI5br2kCTLwsVz2hpabAzBOr2YV3vMRB5i7COYSMCgYEAyFgL\n3DkKwsTTyVyplenoAZaS/o0mKxZnffRnHNP5QgRfT4pQkuogk+MYAeBuGsc4cTi7\nrTtytUMBABXEKGIJkAbNoASHQMUcO1vvcwhBW7Ay+oxuc0JSlnaXjowS0C0o/4qr\nQ/rpUneir+Vu/N8+6edETRkNj+5unmePEe9NBuUCgYEAgtUr31woHot8FcRxNdW0\nkpstRCe20PZqgjMOt9t7UB1P8uSuqo7K2RHTYuUWNHb4h/ejyNXbumPTA6q5Zmta\nw1pmnWo3TXCrze0iBNFlBazf2kwMdbW+Zs2vuCAm8dIwMylnA6PzNj7FtRETfBqr\nzDVfdsFYTcTBUGJ21qXqaV0CgYEAmuMPEEv9WMTo43VDGsaCeq/Zpvii+I7SphsM\nmMn8m6Bbu1e4oUxmsU7RoanMFeHNbiMpXW1namGJ5XHufDYHJJVN5Zd6pYV+JRoX\njjxkoyke0Hs/bNZqmS7ITwlWBiHT33Rqohzaw8oAObLMUq2ZqyYDtQNYa90vIkH3\n5yq1x00CgYEAs4ztQhGRbeUlqnW6Z6yfRJ6XXYqdMPhxuBxvNn/dxJ10T4W2DUuC\njSdpGXrY+ECYyXUwlXBqbaKx1K5AQD7nmu9J3l0oMkX6tSBj1OE5MabATrsW6wvT\nhkTPJZMyPUYhoBkivPUKyQXswrQV/nUQAsAcLeJShTW4gSs0M6weQAc=\n-----END RSA PRIVATE KEY-----\n" // EncryptedKeyBase64 SkPem in base64 format EncryptedKeyBase64 = "NW/6N5Ubo5T+oiT9My2wXFH5TWT7iQnN8YKUlcoFeg00OzL1S4yKrIPemdr7SM3EbPeHlBtOAM3z+06EmaNlwVdBiexSRJmgnknqwt/Ught4pKZK/WdJAEhMRwjZ3nx1Qi1TYcw7oZBaOdeTdm65QEAnsqOHk1htnUTXqsqYxVF750u8JWq3Mzr3oCN65ydSJRQoSa+lo3DikIDrXSYe1LRY5epMRrOq3cujuykuAVZQWp1vzv4w4V6mffmxaDbPpln/w28FKCxYkxG/WhwGuXR1GK6IWr3xpXPKcG+lzfvlmh4UiK1Lad/YD460oMXOKZT8apn4HL4tl9HOb6RyWQ==" From 0f5a7e359aa71d9715954dd8272e961486f688b9 Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Tue, 24 Oct 2023 13:18:53 +0300 Subject: [PATCH 17/54] Holesky support (#1166) * Squashed commit of the following: commit 315634bfa500070de8d201066f5ec6ced5ebbb02 Merge: 17d2cbd32 37df40691 Author: moshe-blox Date: Tue Oct 24 12:52:53 2023 +0300 Merge branch 'holesky-stage-config' into holesky-stage-configuration commit 17d2cbd32f23dfd7e723362e6af1bacd46547064 Author: moshe-blox Date: Tue Oct 24 12:49:13 2023 +0300 deploy to 5--8 commit cdc6da29699f52f9c08fe860b9b8d60c572d2ed9 Author: moshe-blox Date: Tue Oct 24 12:23:54 2023 +0300 remove unused func commit 37df4069149c47edbc09abb1e555444ea555bf9d Author: Lior Rutenberg Date: Sun Oct 22 19:34:50 2023 +0300 update ekm version to holskey supported pr commit 367ff64b06d778ae3115f199aa59cc932eab269d Author: moshe-blox Date: Sun Oct 22 16:34:51 2023 +0300 removed unused method commit 094f7c4c730edcf91115b204995f51e671dc6f38 Author: moshe-blox Date: Sun Oct 22 16:11:05 2023 +0300 approve spec diffs commit 1eb377f0bb888a5093e11174d2512518a17d1567 Author: moshe-blox Date: Sun Oct 22 16:09:56 2023 +0300 fixes for new linter version commit c17dd609651f579c4cb71fdcbcafdae84ab40d9d Author: moshe-blox Date: Sun Oct 22 15:34:39 2023 +0300 fix more spec tests commit ad965b039247fad29befdae35af8585457b6f813 Author: Lior Rutenberg Date: Sun Oct 22 14:28:58 2023 +0300 updated supported network commit c1449e44225c3c2dc1f70e8daaaaeaf4a7d0d2e8 Author: Lior Rutenberg Date: Sun Oct 22 14:23:51 2023 +0300 updated k8 files config commit 3ba3c3d1d0a5a57e5d464efedeeffefa752f02ac Author: moshe-blox Date: Sun Oct 22 14:13:33 2023 +0300 more spec test fixes commit f36ad22374444c548d979d3972305586e5d22690 Merge: 97b04f59d 1fbe548a3 Author: Lior Rutenberg Date: Sun Oct 22 14:01:57 2023 +0300 Merge branch 'update-spec' into holesky-stage-config commit 1fbe548a3781e61919e667426367b9b8b3e5efe1 Author: moshe-blox Date: Sun Oct 22 13:25:26 2023 +0300 undo temporary test commit 80ed66dfd1ad5a0d3beba09896a5ccd439b85e2b Author: moshe-blox Date: Sun Oct 22 13:25:00 2023 +0300 qbft spectest fixes commit ff61279bfd95353e20eee69e8cde83056543bae0 Author: moshe-blox Date: Sun Oct 22 13:03:22 2023 +0300 go mod tidy commit bb3bad1df48ebc7ec922826492d7835583f959dc Author: moshe-blox Date: Sun Oct 22 13:02:17 2023 +0300 update spec to 0.3.3 commit ea4af6bb7ede37ceee24f382c066132a134c8324 Author: moshe-blox Date: Fri Oct 20 17:28:35 2023 +0300 state comparison wip commit 8b8e0fc6f73e6b37e539dfe26de484c8db30cb2f Author: moshe-blox Date: Thu Oct 19 13:18:38 2023 +0300 log skipped tests commit d5aae931e6674dace4c121b4b33287e940d093de Author: moshe-blox Date: Thu Oct 19 13:17:09 2023 +0300 ignore voluntary exit tests commit e308f56e887e9c7e113854d48b5747e8aaa6c21b Author: moshe-blox Date: Thu Oct 19 13:04:09 2023 +0300 more alignments & differ approvals commit bb10c4630285eb38d9110a27944eb06181a6b196 Author: moshe-blox Date: Thu Oct 19 12:52:48 2023 +0300 lint fixes commit edb9f65e5aacb6103671a3571f109cb7dd63702e Author: moshe-blox Date: Thu Oct 19 12:38:42 2023 +0300 refactors: remove syncing methods commit a237d840528d9ff50b63e4e4ecc3edaa18bf32ef Author: moshe-blox Date: Wed Oct 18 17:19:09 2023 +0300 spec alignments commit bc7999317e9600e3a3b7e5d6dd70e6fe586d5eba Author: moshe-blox Date: Wed Oct 18 16:59:13 2023 +0300 generate mocks commit 083f164045112b1cc35d21a6cc59b450b3a340ce Merge: e436f3b3d 090b237a3 Author: moshe-blox Date: Wed Oct 18 16:57:43 2023 +0300 Merge branch 'stage' into update-spec commit 97b04f59d7bb04c7364b9c14cb6eb31186d66003 Author: stoyan.peev Date: Wed Oct 18 16:10:48 2023 +0300 Deploy Holesky to ssv-nodes 1-4 commit f27978d20dbf7ac133e71becc9ead55158d37fa7 Author: stoyan.peev Date: Wed Oct 18 14:23:44 2023 +0300 Added enr for Holesky boot node commit 8a73e8a8949f62578f7871e3ed4da2e659d3d99a Author: Lior Rutenberg Date: Wed Oct 18 11:24:33 2023 +0300 added holesky support commit e436f3b3deec9bb16f45cda17811a3e0d677f74b Author: moshe-blox Date: Wed Sep 20 12:17:16 2023 +0300 update ssv-spec to main branch commit 58ff263035ce86c16930e73707e4d66cf38b0e3f Merge: f2d01ba25 b2a8ec4e7 Author: moshe-blox Date: Wed Sep 20 12:14:19 2023 +0300 Merge branch 'stage' into update-spec commit f2d01ba257d98a0b727f7ae3bfff717ddea2f434 Author: moshe-blox Date: Sun Aug 20 16:27:34 2023 +0300 update spec JSONs commit 26e9929c47f3f11d6c1bef7a5796639dd971b3fe Author: moshe-blox Date: Sun Aug 20 16:02:34 2023 +0300 refactors commit fa3571fdab0ae9237829123e70c53e1338a171bb Author: moshe-blox Date: Sun Aug 20 15:57:26 2023 +0300 update ssv-spec to #drop-blinded-block-rejection * revert gitlab ci * gofmt --------- Co-authored-by: moshe-blox --- .k8/stage/ssv-node-v2-1-deployment.yml | 4 ++-- .k8/stage/ssv-node-v2-2-deployment.yml | 4 ++-- .k8/stage/ssv-node-v2-3-deployment.yml | 4 ++-- .k8/stage/ssv-node-v2-4-deployment.yml | 4 ++-- go.mod | 3 +++ go.sum | 4 ++-- networkconfig/config.go | 1 + networkconfig/holesky-stage.go | 22 ++++++++++++++++++++++ 8 files changed, 36 insertions(+), 10 deletions(-) create mode 100644 networkconfig/holesky-stage.go diff --git a/.k8/stage/ssv-node-v2-1-deployment.yml b/.k8/stage/ssv-node-v2-1-deployment.yml index 340d2a3419..a62ac399cb 100644 --- a/.k8/stage/ssv-node-v2-1-deployment.yml +++ b/.k8/stage/ssv-node-v2-1-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-2-deployment.yml b/.k8/stage/ssv-node-v2-2-deployment.yml index ccb63c8cde..bc728de072 100644 --- a/.k8/stage/ssv-node-v2-2-deployment.yml +++ b/.k8/stage/ssv-node-v2-2-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12002" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-3-deployment.yml b/.k8/stage/ssv-node-v2-3-deployment.yml index d30d7648a5..81ca74db36 100644 --- a/.k8/stage/ssv-node-v2-3-deployment.yml +++ b/.k8/stage/ssv-node-v2-3-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12003" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-4-deployment.yml b/.k8/stage/ssv-node-v2-4-deployment.yml index de012b24f7..a1b98d28a1 100644 --- a/.k8/stage/ssv-node-v2-4-deployment.yml +++ b/.k8/stage/ssv-node-v2-4-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12004" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/go.mod b/go.mod index be8456527b..b39d5e0cc9 100644 --- a/go.mod +++ b/go.mod @@ -222,3 +222,6 @@ require ( replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f + +//TODO remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 +replace github.com/bloxapp/eth2-key-manager => github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 diff --git a/go.sum b/go.sum index 5a76a37965..cf4040a7be 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bloxapp/eth2-key-manager v1.3.1 h1:1olQcOHRY2TN1o8JX9AN1siEIJXWnlM+BlknfBbXoo4= -github.com/bloxapp/eth2-key-manager v1.3.1/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 h1:vjrMmMH15Bo0QF+228CuEZvCI+OuPyJRco82Gj/WyTI= +github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= github.com/bloxapp/ssv-spec v0.3.3 h1:iNomqWQjxDDQouHMjl27PmH1hUolJ4u8QQ+HX/TQQcg= github.com/bloxapp/ssv-spec v0.3.3/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= diff --git a/networkconfig/config.go b/networkconfig/config.go index 5a43b9fdc8..a4791e878e 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -14,6 +14,7 @@ import ( var SupportedConfigs = map[string]NetworkConfig{ Mainnet.Name: Mainnet, + HoleskyStage.Name: HoleskyStage, JatoV2Stage.Name: JatoV2Stage, JatoV2.Name: JatoV2, LocalTestnet.Name: LocalTestnet, diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go new file mode 100644 index 0000000000..c3e9d1aa8a --- /dev/null +++ b/networkconfig/holesky-stage.go @@ -0,0 +1,22 @@ +package networkconfig + +import ( + "math/big" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" +) + +var HoleskyStage = NetworkConfig{ + Name: "holesky-stage", + Beacon: beacon.NewNetwork(spectypes.HoleskyNetwork), + Domain: [4]byte{0x00, 0x00, 0x31, 0x12}, + GenesisEpoch: 1, + RegistrySyncOffset: new(big.Int).SetInt64(84599), + RegistryContractAddr: "0x0d33801785340072C452b994496B19f196b7eE15", + Bootnodes: []string{ + "enr:-Li4QNUN0RdeoHjI4Np18-PX1VXrJ2rJMo2OarRz0wCAxiYlD3s_E4zsmXi1LHv62ULLBT-AQfZIjYefEoEsMDkaEKCGAYtCguORh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhKfrtCyJc2VjcDI1NmsxoQP2e508AoA0B-KH-IaAd3nVCfI9q16lNztV-oTpcH72tIN0Y3CCE4mDdWRwgg-h", + }, + WhitelistedOperatorKeys: []string{}, +} From 2ff2fdccfba66bc6d491de7fabb7ac806240f6f4 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Tue, 24 Oct 2023 14:33:27 +0300 Subject: [PATCH 18/54] Stage hetzner deployment (#1168) * merge between hetzer and stage deployments * remove andrew network * add only stage to the hetzner --- .gitlab-ci.yml | 43 ++++++ .../scripts/deploy-cluster-1--4.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-13--16.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-17--20.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-21--24.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-25--28.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-29--32.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-33--36.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-37--40.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-41--44.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-45--48.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-49--52.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-5--8.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-53--56.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-57--60.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-61--64.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-65--68.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-69--72.sh | 131 +++++++++++++++++ .../scripts/deploy-cluster-9--12.sh | 131 +++++++++++++++++ .k8/hetzner-stage/ssv-node-1-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-10-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-11-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-12-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-13-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-14-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-15-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-16-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-17-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-18-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-19-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-2-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-20-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-21-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-22-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-23-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-24-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-25-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-26-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-27-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-28-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-29-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-3-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-30-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-31-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-32-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-33-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-34-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-35-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-36-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-37-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-38-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-39-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-4-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-40-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-41-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-42-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-43-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-44-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-45-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-46-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-47-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-48-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-49-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-5-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-50-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-51-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-52-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-53-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-54-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-55-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-56-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-57-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-58-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-59-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-6-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-60-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-61-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-62-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-63-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-64-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-65-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-66-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-67-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-68-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-69-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-7-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-70-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-71-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-72-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-8-deployment.yml | 133 ++++++++++++++++++ .k8/hetzner-stage/ssv-node-9-deployment.yml | 133 ++++++++++++++++++ 91 files changed, 11977 insertions(+) create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh create mode 100644 .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh create mode 100644 .k8/hetzner-stage/ssv-node-1-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-10-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-11-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-12-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-13-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-14-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-15-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-16-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-17-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-18-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-19-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-2-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-20-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-21-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-22-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-23-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-24-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-25-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-26-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-27-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-28-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-29-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-3-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-30-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-31-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-32-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-33-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-34-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-35-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-36-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-37-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-38-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-39-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-4-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-40-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-41-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-42-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-43-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-44-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-45-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-46-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-47-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-48-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-49-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-5-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-50-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-51-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-52-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-53-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-54-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-55-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-56-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-57-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-58-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-59-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-6-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-60-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-61-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-62-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-63-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-64-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-65-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-66-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-67-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-68-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-69-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-7-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-70-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-71-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-72-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-8-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-9-deployment.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index afcc42e934..f0819461bc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -88,6 +88,49 @@ Deploy exporter to stage: - stage +# +---------------------+ +# | STAGE HETZNER NODES | +# +---------------------+ + + +Deploy nodes to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT + - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + # + # +--------------------+ + # | Deploy SSV nodes | + # +--------------------+ + - .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + only: + - stage + # +---------------+ # | Prod | # +---------------+ diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh new file mode 100644 index 0000000000..f2a8669b7d --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-1-deployment.yml" + "ssv-node-2-deployment.yml" + "ssv-node-3-deployment.yml" + "ssv-node-4-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh new file mode 100644 index 0000000000..1de999f0e8 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-13-deployment.yml" + "ssv-node-14-deployment.yml" + "ssv-node-15-deployment.yml" + "ssv-node-16-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh new file mode 100644 index 0000000000..812a48e3f6 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-17-deployment.yml" + "ssv-node-18-deployment.yml" + "ssv-node-19-deployment.yml" + "ssv-node-20-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh new file mode 100644 index 0000000000..57c89f2fdd --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-21-deployment.yml" + "ssv-node-22-deployment.yml" + "ssv-node-23-deployment.yml" + "ssv-node-24-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh new file mode 100644 index 0000000000..134e83dad8 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-25-deployment.yml" + "ssv-node-26-deployment.yml" + "ssv-node-27-deployment.yml" + "ssv-node-28-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh new file mode 100644 index 0000000000..6e721e8342 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-29-deployment.yml" + "ssv-node-30-deployment.yml" + "ssv-node-31-deployment.yml" + "ssv-node-32-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh new file mode 100644 index 0000000000..deb2d911e5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-33-deployment.yml" + "ssv-node-34-deployment.yml" + "ssv-node-35-deployment.yml" + "ssv-node-36-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh new file mode 100644 index 0000000000..c82c77ce42 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-37-deployment.yml" + "ssv-node-38-deployment.yml" + "ssv-node-39-deployment.yml" + "ssv-node-40-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh new file mode 100644 index 0000000000..c4684e685e --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-41-deployment.yml" + "ssv-node-42-deployment.yml" + "ssv-node-43-deployment.yml" + "ssv-node-44-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh new file mode 100644 index 0000000000..11a54c9722 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-45-deployment.yml" + "ssv-node-46-deployment.yml" + "ssv-node-47-deployment.yml" + "ssv-node-48-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh new file mode 100644 index 0000000000..dcc90d2742 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-49-deployment.yml" + "ssv-node-50-deployment.yml" + "ssv-node-51-deployment.yml" + "ssv-node-52-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh new file mode 100644 index 0000000000..e3bb9e94a2 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-5-deployment.yml" + "ssv-node-6-deployment.yml" + "ssv-node-7-deployment.yml" + "ssv-node-8-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh new file mode 100644 index 0000000000..9efd728b17 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-53-deployment.yml" + "ssv-node-54-deployment.yml" + "ssv-node-55-deployment.yml" + "ssv-node-56-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh new file mode 100644 index 0000000000..1be68e57f5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-57-deployment.yml" + "ssv-node-58-deployment.yml" + "ssv-node-59-deployment.yml" + "ssv-node-60-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh new file mode 100644 index 0000000000..2fc32263a0 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-61-deployment.yml" + "ssv-node-62-deployment.yml" + "ssv-node-63-deployment.yml" + "ssv-node-64-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh new file mode 100644 index 0000000000..fe57c84c75 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-65-deployment.yml" + "ssv-node-66-deployment.yml" + "ssv-node-67-deployment.yml" + "ssv-node-68-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh new file mode 100644 index 0000000000..229536c0d4 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-69-deployment.yml" + "ssv-node-70-deployment.yml" + "ssv-node-71-deployment.yml" + "ssv-node-72-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh new file mode 100644 index 0000000000..81fe2de698 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-9-deployment.yml" + "ssv-node-10-deployment.yml" + "ssv-node-11-deployment.yml" + "ssv-node-12-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml new file mode 100644 index 0000000000..c36df6e259 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-1-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-1 +spec: + type: ClusterIP + ports: + - port: 12001 + protocol: UDP + targetPort: 12001 + name: port-12001 + - port: 13001 + protocol: TCP + targetPort: 13001 + name: port-13001 + - port: 15001 + protocol: TCP + targetPort: 15001 + name: port-15001 + - port: 16001 + protocol: TCP + targetPort: 16001 + name: port-16001 + selector: + app: ssv-node-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-1 + name: ssv-node-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-1 + template: + metadata: + labels: + app: ssv-node-1 + spec: + containers: + - name: ssv-node-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12001 + name: port-12001 + hostPort: 12001 + protocol: UDP + - containerPort: 13001 + name: port-13001 + hostPort: 13001 + - containerPort: 15001 + name: port-15001 + hostPort: 15001 + - containerPort: 16001 + name: port-16001 + hostPort: 16001 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15001" + - name: SSV_API_PORT + value: "16001" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-1-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-1 + persistentVolumeClaim: + claimName: ssv-node-1 + - name: ssv-node-1-cm + configMap: + name: ssv-node-1-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml new file mode 100644 index 0000000000..216e789152 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-10-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-10 +spec: + type: ClusterIP + ports: + - port: 12010 + protocol: UDP + targetPort: 12010 + name: port-12010 + - port: 13010 + protocol: TCP + targetPort: 13010 + name: port-13010 + - port: 15010 + protocol: TCP + targetPort: 15010 + name: port-15010 + - port: 16010 + protocol: TCP + targetPort: 16010 + name: port-16010 + selector: + app: ssv-node-10 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-10 + name: ssv-node-10 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-10 + template: + metadata: + labels: + app: ssv-node-10 + spec: + containers: + - name: ssv-node-10 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12010 + name: port-12010 + protocol: UDP + hostPort: 12010 + - containerPort: 13010 + name: port-13010 + hostPort: 13010 + - containerPort: 15010 + name: port-15010 + hostPort: 15010 + - containerPort: 16010 + name: port-16010 + hostPort: 16010 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15010" + - name: SSV_API_PORT + value: "16010" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-10 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-10-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-10 + persistentVolumeClaim: + claimName: ssv-node-10 + - name: ssv-node-10-cm + configMap: + name: ssv-node-10-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml new file mode 100644 index 0000000000..c3eb635410 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-11-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-11 +spec: + type: ClusterIP + ports: + - port: 12011 + protocol: UDP + targetPort: 12011 + name: port-12011 + - port: 13011 + protocol: TCP + targetPort: 13011 + name: port-13011 + - port: 15011 + protocol: TCP + targetPort: 15011 + name: port-15011 + - port: 16011 + protocol: TCP + targetPort: 16011 + name: port-16011 + selector: + app: ssv-node-11 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-11 + name: ssv-node-11 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-11 + template: + metadata: + labels: + app: ssv-node-11 + spec: + containers: + - name: ssv-node-11 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12011 + name: port-12011 + protocol: UDP + hostPort: 12011 + - containerPort: 13011 + name: port-13011 + hostPort: 13011 + - containerPort: 15011 + name: port-15011 + hostPort: 15011 + - containerPort: 16011 + name: port-16011 + hostPort: 16011 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15011" + - name: SSV_API_PORT + value: "16011" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-11 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-11-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-11 + persistentVolumeClaim: + claimName: ssv-node-11 + - name: ssv-node-11-cm + configMap: + name: ssv-node-11-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml new file mode 100644 index 0000000000..81df024991 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-12-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-12 +spec: + type: ClusterIP + ports: + - port: 12012 + protocol: UDP + targetPort: 12012 + name: port-12012 + - port: 13012 + protocol: TCP + targetPort: 13012 + name: port-13012 + - port: 15012 + protocol: TCP + targetPort: 15012 + name: port-15012 + - port: 16012 + protocol: TCP + targetPort: 16012 + name: port-16012 + selector: + app: ssv-node-12 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-12 + name: ssv-node-12 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-12 + template: + metadata: + labels: + app: ssv-node-12 + spec: + containers: + - name: ssv-node-12 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12012 + name: port-12012 + protocol: UDP + hostPort: 12012 + - containerPort: 13012 + name: port-13012 + hostPort: 13012 + - containerPort: 15012 + name: port-15012 + hostPort: 15012 + - containerPort: 16012 + name: port-16012 + hostPort: 16012 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15012" + - name: SSV_API_PORT + value: "16012" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-12 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-12-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-12 + persistentVolumeClaim: + claimName: ssv-node-12 + - name: ssv-node-12-cm + configMap: + name: ssv-node-12-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml new file mode 100644 index 0000000000..b54177f184 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-13-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-13 +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-node-13 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-13 + name: ssv-node-13 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-13 + template: + metadata: + labels: + app: ssv-node-13 + spec: + containers: + - name: ssv-node-13 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + protocol: UDP + hostPort: 12013 + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-13 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-13-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-13 + persistentVolumeClaim: + claimName: ssv-node-13 + - name: ssv-node-13-cm + configMap: + name: ssv-node-13-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml new file mode 100644 index 0000000000..ca4aa3e735 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-14-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-14 +spec: + type: ClusterIP + ports: + - port: 12014 + protocol: UDP + targetPort: 12014 + name: port-12014 + - port: 13014 + protocol: TCP + targetPort: 13014 + name: port-13014 + - port: 15014 + protocol: TCP + targetPort: 15014 + name: port-15014 + - port: 16014 + protocol: TCP + targetPort: 16014 + name: port-16014 + selector: + app: ssv-node-14 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-14 + name: ssv-node-14 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-14 + template: + metadata: + labels: + app: ssv-node-14 + spec: + containers: + - name: ssv-node-14 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12014 + name: port-12014 + protocol: UDP + hostPort: 12014 + - containerPort: 13014 + name: port-13014 + hostPort: 13014 + - containerPort: 15014 + name: port-15014 + hostPort: 15014 + - containerPort: 16014 + name: port-16014 + hostPort: 16014 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15014" + - name: SSV_API_PORT + value: "16014" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-14 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-14-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-14 + persistentVolumeClaim: + claimName: ssv-node-14 + - name: ssv-node-14-cm + configMap: + name: ssv-node-14-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml new file mode 100644 index 0000000000..00a87f3fde --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-15-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-15 +spec: + type: ClusterIP + ports: + - port: 12015 + protocol: UDP + targetPort: 12015 + name: port-12015 + - port: 13015 + protocol: TCP + targetPort: 13015 + name: port-13015 + - port: 15015 + protocol: TCP + targetPort: 15015 + name: port-15015 + - port: 16015 + protocol: TCP + targetPort: 16015 + name: port-16015 + selector: + app: ssv-node-15 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-15 + name: ssv-node-15 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-15 + template: + metadata: + labels: + app: ssv-node-15 + spec: + containers: + - name: ssv-node-15 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12015 + name: port-12015 + protocol: UDP + hostPort: 12015 + - containerPort: 13015 + name: port-13015 + hostPort: 13015 + - containerPort: 15015 + name: port-15015 + hostPort: 15015 + - containerPort: 16015 + name: port-16015 + hostPort: 16015 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15015" + - name: SSV_API_PORT + value: "16015" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-15 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-15-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-15 + persistentVolumeClaim: + claimName: ssv-node-15 + - name: ssv-node-15-cm + configMap: + name: ssv-node-15-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml new file mode 100644 index 0000000000..dfd2fd4645 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-16-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-16 +spec: + type: ClusterIP + ports: + - port: 12016 + protocol: UDP + targetPort: 12016 + name: port-12016 + - port: 13016 + protocol: TCP + targetPort: 13016 + name: port-13016 + - port: 15016 + protocol: TCP + targetPort: 15016 + name: port-15016 + - port: 16016 + protocol: TCP + targetPort: 16016 + name: port-16016 + selector: + app: ssv-node-16 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-16 + name: ssv-node-16 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-16 + template: + metadata: + labels: + app: ssv-node-16 + spec: + containers: + - name: ssv-node-16 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12016 + name: port-12016 + protocol: UDP + hostPort: 12016 + - containerPort: 13016 + name: port-13016 + hostPort: 13016 + - containerPort: 15016 + name: port-15016 + hostPort: 15016 + - containerPort: 16016 + name: port-16016 + hostPort: 16016 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15016" + - name: SSV_API_PORT + value: "16016" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-16 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-16-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-16 + persistentVolumeClaim: + claimName: ssv-node-16 + - name: ssv-node-16-cm + configMap: + name: ssv-node-16-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml new file mode 100644 index 0000000000..ca58dcc94e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-17-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-17 +spec: + type: ClusterIP + ports: + - port: 12017 + protocol: UDP + targetPort: 12017 + name: port-12017 + - port: 13017 + protocol: TCP + targetPort: 13017 + name: port-13017 + - port: 15017 + protocol: TCP + targetPort: 15017 + name: port-15017 + - port: 16017 + protocol: TCP + targetPort: 16017 + name: port-16017 + selector: + app: ssv-node-17 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-17 + name: ssv-node-17 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-17 + template: + metadata: + labels: + app: ssv-node-17 + spec: + containers: + - name: ssv-node-17 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12017 + name: port-12017 + protocol: UDP + hostPort: 12017 + - containerPort: 13017 + name: port-13017 + hostPort: 13017 + - containerPort: 15017 + name: port-15017 + hostPort: 15017 + - containerPort: 16017 + name: port-16017 + hostPort: 16017 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15017" + - name: SSV_API_PORT + value: "16017" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-17 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-17-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-17 + persistentVolumeClaim: + claimName: ssv-node-17 + - name: ssv-node-17-cm + configMap: + name: ssv-node-17-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml new file mode 100644 index 0000000000..52dd6c3330 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-18-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-18 +spec: + type: ClusterIP + ports: + - port: 12018 + protocol: UDP + targetPort: 12018 + name: port-12018 + - port: 13018 + protocol: TCP + targetPort: 13018 + name: port-13018 + - port: 15018 + protocol: TCP + targetPort: 15018 + name: port-15018 + - port: 16018 + protocol: TCP + targetPort: 16018 + name: port-16018 + selector: + app: ssv-node-18 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-18 + name: ssv-node-18 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-18 + template: + metadata: + labels: + app: ssv-node-18 + spec: + containers: + - name: ssv-node-18 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12018 + name: port-12018 + protocol: UDP + hostPort: 12018 + - containerPort: 13018 + name: port-13018 + hostPort: 13018 + - containerPort: 15018 + name: port-15018 + hostPort: 15018 + - containerPort: 16018 + name: port-16018 + hostPort: 16018 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15018" + - name: SSV_API_PORT + value: "16018" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-18 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-18-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-18 + persistentVolumeClaim: + claimName: ssv-node-18 + - name: ssv-node-18-cm + configMap: + name: ssv-node-18-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml new file mode 100644 index 0000000000..f60ef88662 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-19-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-19 +spec: + type: ClusterIP + ports: + - port: 12019 + protocol: UDP + targetPort: 12019 + name: port-12019 + - port: 13019 + protocol: TCP + targetPort: 13019 + name: port-13019 + - port: 15019 + protocol: TCP + targetPort: 15019 + name: port-15019 + - port: 16019 + protocol: TCP + targetPort: 16019 + name: port-16019 + selector: + app: ssv-node-19 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-19 + name: ssv-node-19 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-19 + template: + metadata: + labels: + app: ssv-node-19 + spec: + containers: + - name: ssv-node-19 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12019 + name: port-12019 + protocol: UDP + hostPort: 12019 + - containerPort: 13019 + name: port-13019 + hostPort: 13019 + - containerPort: 15019 + name: port-15019 + hostPort: 15019 + - containerPort: 16019 + name: port-16019 + hostPort: 16019 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15019" + - name: SSV_API_PORT + value: "16019" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-19 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-19-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-19 + persistentVolumeClaim: + claimName: ssv-node-19 + - name: ssv-node-19-cm + configMap: + name: ssv-node-19-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml new file mode 100644 index 0000000000..1d66d1d863 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-2-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-2 +spec: + type: ClusterIP + ports: + - port: 12002 + protocol: UDP + targetPort: 12002 + name: port-12002 + - port: 13002 + protocol: TCP + targetPort: 13002 + name: port-13002 + - port: 15002 + protocol: TCP + targetPort: 15002 + name: port-15002 + - port: 16002 + protocol: TCP + targetPort: 16002 + name: port-16002 + selector: + app: ssv-node-2 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-2 + name: ssv-node-2 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-2 + template: + metadata: + labels: + app: ssv-node-2 + spec: + containers: + - name: ssv-node-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12002 + name: port-12002 + protocol: UDP + hostPort: 12002 + - containerPort: 13002 + name: port-13002 + hostPort: 13002 + - containerPort: 15002 + name: port-15002 + hostPort: 15002 + - containerPort: 16002 + name: port-16002 + hostPort: 16002 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15002" + - name: SSV_API_PORT + value: "16002" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-2 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-2-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-2 + persistentVolumeClaim: + claimName: ssv-node-2 + - name: ssv-node-2-cm + configMap: + name: ssv-node-2-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml new file mode 100644 index 0000000000..97eb182b6c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-20-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-20 +spec: + type: ClusterIP + ports: + - port: 12020 + protocol: UDP + targetPort: 12020 + name: port-12020 + - port: 13020 + protocol: TCP + targetPort: 13020 + name: port-13020 + - port: 15020 + protocol: TCP + targetPort: 15020 + name: port-15020 + - port: 16020 + protocol: TCP + targetPort: 16020 + name: port-16020 + selector: + app: ssv-node-20 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-20 + name: ssv-node-20 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-20 + template: + metadata: + labels: + app: ssv-node-20 + spec: + containers: + - name: ssv-node-20 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12020 + name: port-12020 + protocol: UDP + hostPort: 12020 + - containerPort: 13020 + name: port-13020 + hostPort: 13020 + - containerPort: 15020 + name: port-15020 + hostPort: 15020 + - containerPort: 16020 + name: port-16020 + hostPort: 16020 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15020" + - name: SSV_API_PORT + value: "16020" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-20 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-20-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-20 + persistentVolumeClaim: + claimName: ssv-node-20 + - name: ssv-node-20-cm + configMap: + name: ssv-node-20-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml new file mode 100644 index 0000000000..f372232436 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-21-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-21 +spec: + type: ClusterIP + ports: + - port: 12021 + protocol: UDP + targetPort: 12021 + name: port-12021 + - port: 13021 + protocol: TCP + targetPort: 13021 + name: port-13021 + - port: 15021 + protocol: TCP + targetPort: 15021 + name: port-15021 + - port: 16021 + protocol: TCP + targetPort: 16021 + name: port-16021 + selector: + app: ssv-node-21 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-21 + name: ssv-node-21 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-21 + template: + metadata: + labels: + app: ssv-node-21 + spec: + containers: + - name: ssv-node-21 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + protocol: UDP + hostPort: 12021 + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-21 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-21-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-21 + persistentVolumeClaim: + claimName: ssv-node-21 + - name: ssv-node-21-cm + configMap: + name: ssv-node-21-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml new file mode 100644 index 0000000000..06f8ed6abe --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-22-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-22 +spec: + type: ClusterIP + ports: + - port: 12022 + protocol: UDP + targetPort: 12022 + name: port-12022 + - port: 13022 + protocol: TCP + targetPort: 13022 + name: port-13022 + - port: 15022 + protocol: TCP + targetPort: 15022 + name: port-15022 + - port: 16022 + protocol: TCP + targetPort: 16022 + name: port-16022 + selector: + app: ssv-node-22 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-22 + name: ssv-node-22 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-22 + template: + metadata: + labels: + app: ssv-node-22 + spec: + containers: + - name: ssv-node-22 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + protocol: UDP + hostPort: 12022 + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-22 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-22-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-22 + persistentVolumeClaim: + claimName: ssv-node-22 + - name: ssv-node-22-cm + configMap: + name: ssv-node-22-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-23-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml new file mode 100644 index 0000000000..b9bee42755 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-23-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-23 +spec: + type: ClusterIP + ports: + - port: 12023 + protocol: UDP + targetPort: 12023 + name: port-12023 + - port: 13023 + protocol: TCP + targetPort: 13023 + name: port-13023 + - port: 15023 + protocol: TCP + targetPort: 15023 + name: port-15023 + - port: 16023 + protocol: TCP + targetPort: 16023 + name: port-16023 + selector: + app: ssv-node-23 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-23 + name: ssv-node-23 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-23 + template: + metadata: + labels: + app: ssv-node-23 + spec: + containers: + - name: ssv-node-23 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12023 + name: port-12023 + protocol: UDP + hostPort: 12023 + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15023" + - name: SSV_API_PORT + value: "16023" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-23 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-23-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-23 + persistentVolumeClaim: + claimName: ssv-node-23 + - name: ssv-node-23-cm + configMap: + name: ssv-node-23-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-24-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml new file mode 100644 index 0000000000..b2afaef49b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-24-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-24 +spec: + type: ClusterIP + ports: + - port: 12424 + protocol: UDP + targetPort: 12424 + name: port-12424 + - port: 13024 + protocol: TCP + targetPort: 13024 + name: port-13024 + - port: 15024 + protocol: TCP + targetPort: 15024 + name: port-15024 + - port: 16024 + protocol: TCP + targetPort: 16024 + name: port-16024 + selector: + app: ssv-node-24 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-24 + name: ssv-node-24 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-24 + template: + metadata: + labels: + app: ssv-node-24 + spec: + containers: + - name: ssv-node-24 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12424 + name: port-12424 + protocol: UDP + hostPort: 12424 + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15024" + - name: SSV_API_PORT + value: "16024" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-24 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-24-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-24 + persistentVolumeClaim: + claimName: ssv-node-24 + - name: ssv-node-24-cm + configMap: + name: ssv-node-24-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-25-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml new file mode 100644 index 0000000000..8b8f836456 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-25-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-25 +spec: + type: ClusterIP + ports: + - port: 12025 + protocol: UDP + targetPort: 12025 + name: port-12025 + - port: 13025 + protocol: TCP + targetPort: 13025 + name: port-13025 + - port: 15025 + protocol: TCP + targetPort: 15025 + name: port-15025 + - port: 16025 + protocol: TCP + targetPort: 16025 + name: port-16025 + selector: + app: ssv-node-25 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-25 + name: ssv-node-25 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-25 + template: + metadata: + labels: + app: ssv-node-25 + spec: + containers: + - name: ssv-node-25 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12025 + name: port-12025 + protocol: UDP + hostPort: 12025 + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15025" + - name: SSV_API_PORT + value: "16025" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-25 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-25-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-25 + persistentVolumeClaim: + claimName: ssv-node-25 + - name: ssv-node-25-cm + configMap: + name: ssv-node-25-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-26-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml new file mode 100644 index 0000000000..80db29e49c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-26-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-26 +spec: + type: ClusterIP + ports: + - port: 12026 + protocol: UDP + targetPort: 12026 + name: port-12026 + - port: 13026 + protocol: TCP + targetPort: 13026 + name: port-13026 + - port: 15026 + protocol: TCP + targetPort: 15026 + name: port-15026 + - port: 16026 + protocol: TCP + targetPort: 16026 + name: port-16026 + selector: + app: ssv-node-26 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-26 + name: ssv-node-26 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-26 + template: + metadata: + labels: + app: ssv-node-26 + spec: + containers: + - name: ssv-node-26 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12026 + name: port-12026 + protocol: UDP + hostPort: 12026 + - containerPort: 13026 + name: port-13026 + hostPort: 13026 + - containerPort: 15026 + name: port-15026 + hostPort: 15026 + - containerPort: 16026 + name: port-16026 + hostPort: 16026 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15026" + - name: SSV_API_PORT + value: "16026" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-26 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-26-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-26 + persistentVolumeClaim: + claimName: ssv-node-26 + - name: ssv-node-26-cm + configMap: + name: ssv-node-26-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml new file mode 100644 index 0000000000..6353fcd60f --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-27-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-27 +spec: + type: ClusterIP + ports: + - port: 12027 + protocol: UDP + targetPort: 12027 + name: port-12027 + - port: 13027 + protocol: TCP + targetPort: 13027 + name: port-13027 + - port: 15027 + protocol: TCP + targetPort: 15027 + name: port-15027 + - port: 16027 + protocol: TCP + targetPort: 16027 + name: port-16027 + selector: + app: ssv-node-27 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-27 + name: ssv-node-27 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-27 + template: + metadata: + labels: + app: ssv-node-27 + spec: + containers: + - name: ssv-node-27 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12027 + name: port-12027 + protocol: UDP + hostPort: 12027 + - containerPort: 13027 + name: port-13027 + hostPort: 13027 + - containerPort: 15027 + name: port-15027 + hostPort: 15027 + - containerPort: 16027 + name: port-16027 + hostPort: 16027 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15027" + - name: SSV_API_PORT + value: "16027" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-27 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-27-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-27 + persistentVolumeClaim: + claimName: ssv-node-27 + - name: ssv-node-27-cm + configMap: + name: ssv-node-27-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml new file mode 100644 index 0000000000..da3457f71c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-28-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-28 +spec: + type: ClusterIP + ports: + - port: 12028 + protocol: UDP + targetPort: 12028 + name: port-12028 + - port: 13028 + protocol: TCP + targetPort: 13028 + name: port-13028 + - port: 15028 + protocol: TCP + targetPort: 15028 + name: port-15028 + - port: 16028 + protocol: TCP + targetPort: 16028 + name: port-16028 + selector: + app: ssv-node-28 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-28 + name: ssv-node-28 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-28 + template: + metadata: + labels: + app: ssv-node-28 + spec: + containers: + - name: ssv-node-28 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12028 + name: port-12028 + protocol: UDP + hostPort: 12028 + - containerPort: 13028 + name: port-13028 + hostPort: 13028 + - containerPort: 15028 + name: port-15028 + hostPort: 15028 + - containerPort: 16028 + name: port-16028 + hostPort: 16028 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15028" + - name: SSV_API_PORT + value: "16028" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-28 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-28-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-28 + persistentVolumeClaim: + claimName: ssv-node-28 + - name: ssv-node-28-cm + configMap: + name: ssv-node-28-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml new file mode 100644 index 0000000000..a225fc9d1e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-29-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-29 +spec: + type: ClusterIP + ports: + - port: 12029 + protocol: UDP + targetPort: 12029 + name: port-12029 + - port: 13029 + protocol: TCP + targetPort: 13029 + name: port-13029 + - port: 15029 + protocol: TCP + targetPort: 15029 + name: port-15029 + - port: 16029 + protocol: TCP + targetPort: 16029 + name: port-16029 + selector: + app: ssv-node-29 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-29 + name: ssv-node-29 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-29 + template: + metadata: + labels: + app: ssv-node-29 + spec: + containers: + - name: ssv-node-29 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12029 + name: port-12029 + protocol: UDP + hostPort: 12029 + - containerPort: 13029 + name: port-13029 + hostPort: 13029 + - containerPort: 15029 + name: port-15029 + hostPort: 15029 + - containerPort: 16029 + name: port-16029 + hostPort: 16029 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15029" + - name: SSV_API_PORT + value: "16029" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-29 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-29-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-29 + persistentVolumeClaim: + claimName: ssv-node-29 + - name: ssv-node-29-cm + configMap: + name: ssv-node-29-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml new file mode 100644 index 0000000000..5727ef1a48 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-3-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-3 +spec: + type: ClusterIP + ports: + - port: 12003 + protocol: UDP + targetPort: 12003 + name: port-12003 + - port: 13003 + protocol: TCP + targetPort: 13003 + name: port-13003 + - port: 15003 + protocol: TCP + targetPort: 15003 + name: port-15003 + - port: 16003 + protocol: TCP + targetPort: 16003 + name: port-16003 + selector: + app: ssv-node-3 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-3 + name: ssv-node-3 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-3 + template: + metadata: + labels: + app: ssv-node-3 + spec: + containers: + - name: ssv-node-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12003 + name: port-12003 + protocol: UDP + hostPort: 12003 + - containerPort: 13003 + name: port-13003 + hostPort: 13003 + - containerPort: 15003 + name: port-15003 + hostPort: 15003 + - containerPort: 16003 + name: port-16003 + hostPort: 16003 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15003" + - name: SSV_API_PORT + value: "16003" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-3 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-3-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-3 + persistentVolumeClaim: + claimName: ssv-node-3 + - name: ssv-node-3-cm + configMap: + name: ssv-node-3-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml new file mode 100644 index 0000000000..82f425d7c3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-30-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-30 +spec: + type: ClusterIP + ports: + - port: 12030 + protocol: UDP + targetPort: 12030 + name: port-12030 + - port: 13030 + protocol: TCP + targetPort: 13030 + name: port-13030 + - port: 15030 + protocol: TCP + targetPort: 15030 + name: port-15030 + - port: 16030 + protocol: TCP + targetPort: 16030 + name: port-16030 + selector: + app: ssv-node-30 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-30 + name: ssv-node-30 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-30 + template: + metadata: + labels: + app: ssv-node-30 + spec: + containers: + - name: ssv-node-30 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12030 + name: port-12030 + protocol: UDP + hostPort: 12030 + - containerPort: 13030 + name: port-13030 + hostPort: 13030 + - containerPort: 15030 + name: port-15030 + hostPort: 15030 + - containerPort: 16030 + name: port-16030 + hostPort: 16030 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15030" + - name: SSV_API_PORT + value: "16030" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-30 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-30-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-30 + persistentVolumeClaim: + claimName: ssv-node-30 + - name: ssv-node-30-cm + configMap: + name: ssv-node-30-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml new file mode 100644 index 0000000000..0daf1767e1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-31-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-31 +spec: + type: ClusterIP + ports: + - port: 12031 + protocol: UDP + targetPort: 12031 + name: port-12031 + - port: 13031 + protocol: TCP + targetPort: 13031 + name: port-13031 + - port: 15031 + protocol: TCP + targetPort: 15031 + name: port-15031 + - port: 16031 + protocol: TCP + targetPort: 16031 + name: port-16031 + selector: + app: ssv-node-31 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-31 + name: ssv-node-31 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-31 + template: + metadata: + labels: + app: ssv-node-31 + spec: + containers: + - name: ssv-node-31 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12031 + name: port-12031 + protocol: UDP + hostPort: 12031 + - containerPort: 13031 + name: port-13031 + hostPort: 13031 + - containerPort: 15031 + name: port-15031 + hostPort: 15031 + - containerPort: 16031 + name: port-16031 + hostPort: 16031 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15031" + - name: SSV_API_PORT + value: "16031" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-31 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-31-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-31 + persistentVolumeClaim: + claimName: ssv-node-31 + - name: ssv-node-31-cm + configMap: + name: ssv-node-31-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml new file mode 100644 index 0000000000..0bbba3fe2a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-32-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-32 +spec: + type: ClusterIP + ports: + - port: 12032 + protocol: UDP + targetPort: 12032 + name: port-12032 + - port: 13032 + protocol: TCP + targetPort: 13032 + name: port-13032 + - port: 15032 + protocol: TCP + targetPort: 15032 + name: port-15032 + - port: 16032 + protocol: TCP + targetPort: 16032 + name: port-16032 + selector: + app: ssv-node-32 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-32 + name: ssv-node-32 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-32 + template: + metadata: + labels: + app: ssv-node-32 + spec: + containers: + - name: ssv-node-32 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12032 + name: port-12032 + protocol: UDP + hostPort: 12032 + - containerPort: 13032 + name: port-13032 + hostPort: 13032 + - containerPort: 15032 + name: port-15032 + hostPort: 15032 + - containerPort: 16032 + name: port-16032 + hostPort: 16032 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15032" + - name: SSV_API_PORT + value: "16032" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-32 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-32-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-32 + persistentVolumeClaim: + claimName: ssv-node-32 + - name: ssv-node-32-cm + configMap: + name: ssv-node-32-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml new file mode 100644 index 0000000000..2bc8160b1a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-33-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-33 +spec: + type: ClusterIP + ports: + - port: 12033 + protocol: UDP + targetPort: 12033 + name: port-12033 + - port: 13033 + protocol: TCP + targetPort: 13033 + name: port-13033 + - port: 15033 + protocol: TCP + targetPort: 15033 + name: port-15033 + - port: 16033 + protocol: TCP + targetPort: 16033 + name: port-16033 + selector: + app: ssv-node-33 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-33 + name: ssv-node-33 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-33 + template: + metadata: + labels: + app: ssv-node-33 + spec: + containers: + - name: ssv-node-33 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12033 + name: port-12033 + protocol: UDP + hostPort: 12033 + - containerPort: 13033 + name: port-13033 + hostPort: 13033 + - containerPort: 15033 + name: port-15033 + hostPort: 15033 + - containerPort: 16033 + name: port-16033 + hostPort: 16033 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15033" + - name: SSV_API_PORT + value: "16033" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-33 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-33-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-33 + persistentVolumeClaim: + claimName: ssv-node-33 + - name: ssv-node-33-cm + configMap: + name: ssv-node-33-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml new file mode 100644 index 0000000000..f9bde91d1e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-34-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-34 +spec: + type: ClusterIP + ports: + - port: 12034 + protocol: UDP + targetPort: 12034 + name: port-12034 + - port: 13034 + protocol: TCP + targetPort: 13034 + name: port-13034 + - port: 15034 + protocol: TCP + targetPort: 15034 + name: port-15034 + - port: 16034 + protocol: TCP + targetPort: 16034 + name: port-16034 + selector: + app: ssv-node-34 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-34 + name: ssv-node-34 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-34 + template: + metadata: + labels: + app: ssv-node-34 + spec: + containers: + - name: ssv-node-34 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12034 + name: port-12034 + protocol: UDP + hostPort: 12034 + - containerPort: 13034 + name: port-13034 + hostPort: 13034 + - containerPort: 15034 + name: port-15034 + hostPort: 15034 + - containerPort: 16034 + name: port-16034 + hostPort: 16034 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15034" + - name: SSV_API_PORT + value: "16034" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-34 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-34-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-34 + persistentVolumeClaim: + claimName: ssv-node-34 + - name: ssv-node-34-cm + configMap: + name: ssv-node-34-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml new file mode 100644 index 0000000000..37a070db61 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-35-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-35 +spec: + type: ClusterIP + ports: + - port: 12035 + protocol: UDP + targetPort: 12035 + name: port-12035 + - port: 13035 + protocol: TCP + targetPort: 13035 + name: port-13035 + - port: 15035 + protocol: TCP + targetPort: 15035 + name: port-15035 + - port: 16035 + protocol: TCP + targetPort: 16035 + name: port-16035 + selector: + app: ssv-node-35 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-35 + name: ssv-node-35 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-35 + template: + metadata: + labels: + app: ssv-node-35 + spec: + containers: + - name: ssv-node-35 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12035 + name: port-12035 + protocol: UDP + hostPort: 12035 + - containerPort: 13035 + name: port-13035 + hostPort: 13035 + - containerPort: 15035 + name: port-15035 + hostPort: 15035 + - containerPort: 16035 + name: port-16035 + hostPort: 16035 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15035" + - name: SSV_API_PORT + value: "16035" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-35 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-35-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-35 + persistentVolumeClaim: + claimName: ssv-node-35 + - name: ssv-node-35-cm + configMap: + name: ssv-node-35-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml new file mode 100644 index 0000000000..323b0bbf78 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-36-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-36 +spec: + type: ClusterIP + ports: + - port: 12036 + protocol: UDP + targetPort: 12036 + name: port-12036 + - port: 13036 + protocol: TCP + targetPort: 13036 + name: port-13036 + - port: 15036 + protocol: TCP + targetPort: 15036 + name: port-15036 + - port: 16036 + protocol: TCP + targetPort: 16036 + name: port-16036 + selector: + app: ssv-node-36 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-36 + name: ssv-node-36 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-36 + template: + metadata: + labels: + app: ssv-node-36 + spec: + containers: + - name: ssv-node-36 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12036 + name: port-12036 + protocol: UDP + hostPort: 12036 + - containerPort: 13036 + name: port-13036 + hostPort: 13036 + - containerPort: 15036 + name: port-15036 + hostPort: 15036 + - containerPort: 16036 + name: port-16036 + hostPort: 16036 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15036" + - name: SSV_API_PORT + value: "16036" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-36 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-36-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-36 + persistentVolumeClaim: + claimName: ssv-node-36 + - name: ssv-node-36-cm + configMap: + name: ssv-node-36-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml new file mode 100644 index 0000000000..dc10089edb --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-37-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-37 +spec: + type: ClusterIP + ports: + - port: 12037 + protocol: UDP + targetPort: 12037 + name: port-12037 + - port: 13037 + protocol: TCP + targetPort: 13037 + name: port-13037 + - port: 15037 + protocol: TCP + targetPort: 15037 + name: port-15037 + - port: 16037 + protocol: TCP + targetPort: 16037 + name: port-16037 + selector: + app: ssv-node-37 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-37 + name: ssv-node-37 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-37 + template: + metadata: + labels: + app: ssv-node-37 + spec: + containers: + - name: ssv-node-37 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12037 + name: port-12037 + protocol: UDP + hostPort: 12037 + - containerPort: 13037 + name: port-13037 + hostPort: 13037 + - containerPort: 15037 + name: port-15037 + hostPort: 15037 + - containerPort: 16037 + name: port-16037 + hostPort: 16037 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15037" + - name: SSV_API_PORT + value: "16037" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-37 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-37-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-37 + persistentVolumeClaim: + claimName: ssv-node-37 + - name: ssv-node-37-cm + configMap: + name: ssv-node-37-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml new file mode 100644 index 0000000000..79b47cfc04 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-38-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-38 +spec: + type: ClusterIP + ports: + - port: 12038 + protocol: UDP + targetPort: 12038 + name: port-12038 + - port: 13038 + protocol: TCP + targetPort: 13038 + name: port-13038 + - port: 15038 + protocol: TCP + targetPort: 15038 + name: port-15038 + - port: 16038 + protocol: TCP + targetPort: 16038 + name: port-16038 + selector: + app: ssv-node-38 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-38 + name: ssv-node-38 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-38 + template: + metadata: + labels: + app: ssv-node-38 + spec: + containers: + - name: ssv-node-38 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12038 + name: port-12038 + protocol: UDP + hostPort: 12038 + - containerPort: 13038 + name: port-13038 + hostPort: 13038 + - containerPort: 15038 + name: port-15038 + hostPort: 15038 + - containerPort: 16038 + name: port-16038 + hostPort: 16038 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15038" + - name: SSV_API_PORT + value: "16038" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-38 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-38-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-38 + persistentVolumeClaim: + claimName: ssv-node-38 + - name: ssv-node-38-cm + configMap: + name: ssv-node-38-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml new file mode 100644 index 0000000000..70fb3f419a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-39-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-39 +spec: + type: ClusterIP + ports: + - port: 12039 + protocol: UDP + targetPort: 12039 + name: port-12039 + - port: 13039 + protocol: TCP + targetPort: 13039 + name: port-13039 + - port: 15039 + protocol: TCP + targetPort: 15039 + name: port-15039 + - port: 16039 + protocol: TCP + targetPort: 16039 + name: port-16039 + selector: + app: ssv-node-39 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-39 + name: ssv-node-39 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-39 + template: + metadata: + labels: + app: ssv-node-39 + spec: + containers: + - name: ssv-node-39 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12039 + name: port-12039 + protocol: UDP + hostPort: 12039 + - containerPort: 13039 + name: port-13039 + hostPort: 13039 + - containerPort: 15039 + name: port-15039 + hostPort: 15039 + - containerPort: 16039 + name: port-16039 + hostPort: 16039 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15039" + - name: SSV_API_PORT + value: "16039" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-39 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-39-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-39 + persistentVolumeClaim: + claimName: ssv-node-39 + - name: ssv-node-39-cm + configMap: + name: ssv-node-39-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml new file mode 100644 index 0000000000..0e8185a2c8 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-4-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-4 +spec: + type: ClusterIP + ports: + - port: 12004 + protocol: UDP + targetPort: 12004 + name: port-12004 + - port: 13004 + protocol: TCP + targetPort: 13004 + name: port-13004 + - port: 15004 + protocol: TCP + targetPort: 15004 + name: port-15004 + - port: 16004 + protocol: TCP + targetPort: 16004 + name: port-16004 + selector: + app: ssv-node-4 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-4 + name: ssv-node-4 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-4 + template: + metadata: + labels: + app: ssv-node-4 + spec: + containers: + - name: ssv-node-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12004 + name: port-12004 + protocol: UDP + hostPort: 12004 + - containerPort: 13004 + name: port-13004 + hostPort: 13004 + - containerPort: 15004 + name: port-15004 + hostPort: 15004 + - containerPort: 16004 + name: port-16004 + hostPort: 16004 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15004" + - name: SSV_API_PORT + value: "16004" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-4 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-4-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-4 + persistentVolumeClaim: + claimName: ssv-node-4 + - name: ssv-node-4-cm + configMap: + name: ssv-node-4-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml new file mode 100644 index 0000000000..d79178ef6f --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-40-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-40 +spec: + type: ClusterIP + ports: + - port: 12040 + protocol: UDP + targetPort: 12040 + name: port-12040 + - port: 13040 + protocol: TCP + targetPort: 13040 + name: port-13040 + - port: 15040 + protocol: TCP + targetPort: 15040 + name: port-15040 + - port: 16040 + protocol: TCP + targetPort: 16040 + name: port-16040 + selector: + app: ssv-node-40 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-40 + name: ssv-node-40 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-40 + template: + metadata: + labels: + app: ssv-node-40 + spec: + containers: + - name: ssv-node-40 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12040 + name: port-12040 + protocol: UDP + hostPort: 12040 + - containerPort: 13040 + name: port-13040 + hostPort: 13040 + - containerPort: 15040 + name: port-15040 + hostPort: 15040 + - containerPort: 16040 + name: port-16040 + hostPort: 16040 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15040" + - name: SSV_API_PORT + value: "16040" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-40 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-40-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-40 + persistentVolumeClaim: + claimName: ssv-node-40 + - name: ssv-node-40-cm + configMap: + name: ssv-node-40-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml new file mode 100644 index 0000000000..98b7b4276d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-41-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-41 +spec: + type: ClusterIP + ports: + - port: 12041 + protocol: UDP + targetPort: 12041 + name: port-12041 + - port: 13041 + protocol: TCP + targetPort: 13041 + name: port-13041 + - port: 15041 + protocol: TCP + targetPort: 15041 + name: port-15041 + - port: 16041 + protocol: TCP + targetPort: 16041 + name: port-16041 + selector: + app: ssv-node-41 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-41 + name: ssv-node-41 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-41 + template: + metadata: + labels: + app: ssv-node-41 + spec: + containers: + - name: ssv-node-41 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12041 + name: port-12041 + protocol: UDP + hostPort: 12041 + - containerPort: 13041 + name: port-13041 + hostPort: 13041 + - containerPort: 15041 + name: port-15041 + hostPort: 15041 + - containerPort: 16041 + name: port-16041 + hostPort: 16041 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15041" + - name: SSV_API_PORT + value: "16041" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-41 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-41-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-41 + persistentVolumeClaim: + claimName: ssv-node-41 + - name: ssv-node-41-cm + configMap: + name: ssv-node-41-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml new file mode 100644 index 0000000000..45c566db31 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-42-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-42 +spec: + type: ClusterIP + ports: + - port: 12042 + protocol: UDP + targetPort: 12042 + name: port-12042 + - port: 13042 + protocol: TCP + targetPort: 13042 + name: port-13042 + - port: 15042 + protocol: TCP + targetPort: 15042 + name: port-15042 + - port: 16042 + protocol: TCP + targetPort: 16042 + name: port-16042 + selector: + app: ssv-node-42 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-42 + name: ssv-node-42 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-42 + template: + metadata: + labels: + app: ssv-node-42 + spec: + containers: + - name: ssv-node-42 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12042 + name: port-12042 + protocol: UDP + hostPort: 12042 + - containerPort: 13042 + name: port-13042 + hostPort: 13042 + - containerPort: 15042 + name: port-15042 + hostPort: 15042 + - containerPort: 16042 + name: port-16042 + hostPort: 16042 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15042" + - name: SSV_API_PORT + value: "16042" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-42 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-42-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-42 + persistentVolumeClaim: + claimName: ssv-node-42 + - name: ssv-node-42-cm + configMap: + name: ssv-node-42-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml new file mode 100644 index 0000000000..0866dde623 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-43-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-43 +spec: + type: ClusterIP + ports: + - port: 12043 + protocol: UDP + targetPort: 12043 + name: port-12043 + - port: 13043 + protocol: TCP + targetPort: 13043 + name: port-13043 + - port: 15043 + protocol: TCP + targetPort: 15043 + name: port-15043 + - port: 16043 + protocol: TCP + targetPort: 16043 + name: port-16043 + selector: + app: ssv-node-43 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-43 + name: ssv-node-43 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-43 + template: + metadata: + labels: + app: ssv-node-43 + spec: + containers: + - name: ssv-node-43 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12043 + name: port-12043 + protocol: UDP + hostPort: 12043 + - containerPort: 13043 + name: port-13043 + hostPort: 13043 + - containerPort: 15043 + name: port-15043 + hostPort: 15043 + - containerPort: 16043 + name: port-16043 + hostPort: 16043 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15043" + - name: SSV_API_PORT + value: "16043" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-43 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-43-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-43 + persistentVolumeClaim: + claimName: ssv-node-43 + - name: ssv-node-43-cm + configMap: + name: ssv-node-43-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml new file mode 100644 index 0000000000..d1bb327963 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-44-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-44 +spec: + type: ClusterIP + ports: + - port: 12044 + protocol: UDP + targetPort: 12044 + name: port-12044 + - port: 13044 + protocol: TCP + targetPort: 13044 + name: port-13044 + - port: 15044 + protocol: TCP + targetPort: 15044 + name: port-15044 + - port: 16044 + protocol: TCP + targetPort: 16044 + name: port-16044 + selector: + app: ssv-node-44 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-44 + name: ssv-node-44 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-44 + template: + metadata: + labels: + app: ssv-node-44 + spec: + containers: + - name: ssv-node-44 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12044 + name: port-12044 + protocol: UDP + hostPort: 12044 + - containerPort: 13044 + name: port-13044 + hostPort: 13044 + - containerPort: 15044 + name: port-15044 + hostPort: 15044 + - containerPort: 16044 + name: port-16044 + hostPort: 16044 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15044" + - name: SSV_API_PORT + value: "16044" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-44 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-44-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-44 + persistentVolumeClaim: + claimName: ssv-node-44 + - name: ssv-node-44-cm + configMap: + name: ssv-node-44-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml new file mode 100644 index 0000000000..159d8c92ba --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-45-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-45 +spec: + type: ClusterIP + ports: + - port: 12045 + protocol: UDP + targetPort: 12045 + name: port-12045 + - port: 13045 + protocol: TCP + targetPort: 13045 + name: port-13045 + - port: 15045 + protocol: TCP + targetPort: 15045 + name: port-15045 + - port: 16045 + protocol: TCP + targetPort: 16045 + name: port-16045 + selector: + app: ssv-node-45 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-45 + name: ssv-node-45 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-45 + template: + metadata: + labels: + app: ssv-node-45 + spec: + containers: + - name: ssv-node-45 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12045 + name: port-12045 + protocol: UDP + hostPort: 12045 + - containerPort: 13045 + name: port-13045 + hostPort: 13045 + - containerPort: 15045 + name: port-15045 + hostPort: 15045 + - containerPort: 16045 + name: port-16045 + hostPort: 16045 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15045" + - name: SSV_API_PORT + value: "16045" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-45 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-45-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-45 + persistentVolumeClaim: + claimName: ssv-node-45 + - name: ssv-node-45-cm + configMap: + name: ssv-node-45-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml new file mode 100644 index 0000000000..269646ca17 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-46-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-46 +spec: + type: ClusterIP + ports: + - port: 12046 + protocol: UDP + targetPort: 12046 + name: port-12046 + - port: 13046 + protocol: TCP + targetPort: 13046 + name: port-13046 + - port: 15046 + protocol: TCP + targetPort: 15046 + name: port-15046 + - port: 16046 + protocol: TCP + targetPort: 16046 + name: port-16046 + selector: + app: ssv-node-46 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-46 + name: ssv-node-46 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-46 + template: + metadata: + labels: + app: ssv-node-46 + spec: + containers: + - name: ssv-node-46 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12046 + name: port-12046 + protocol: UDP + hostPort: 12046 + - containerPort: 13046 + name: port-13046 + hostPort: 13046 + - containerPort: 15046 + name: port-15046 + hostPort: 15046 + - containerPort: 16046 + name: port-16046 + hostPort: 16046 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15046" + - name: SSV_API_PORT + value: "16046" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-46 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-46-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-46 + persistentVolumeClaim: + claimName: ssv-node-46 + - name: ssv-node-46-cm + configMap: + name: ssv-node-46-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml new file mode 100644 index 0000000000..90b2018e04 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-47-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-47 +spec: + type: ClusterIP + ports: + - port: 12047 + protocol: UDP + targetPort: 12047 + name: port-12047 + - port: 13047 + protocol: TCP + targetPort: 13047 + name: port-13047 + - port: 15047 + protocol: TCP + targetPort: 15047 + name: port-15047 + - port: 16047 + protocol: TCP + targetPort: 16047 + name: port-16047 + selector: + app: ssv-node-47 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-47 + name: ssv-node-47 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-47 + template: + metadata: + labels: + app: ssv-node-47 + spec: + containers: + - name: ssv-node-47 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12047 + name: port-12047 + protocol: UDP + hostPort: 12047 + - containerPort: 13047 + name: port-13047 + hostPort: 13047 + - containerPort: 15047 + name: port-15047 + hostPort: 15047 + - containerPort: 16047 + name: port-16047 + hostPort: 16047 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15047" + - name: SSV_API_PORT + value: "16047" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-47 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-47-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-47 + persistentVolumeClaim: + claimName: ssv-node-47 + - name: ssv-node-47-cm + configMap: + name: ssv-node-47-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml new file mode 100644 index 0000000000..227ae0f11a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-48-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-48 +spec: + type: ClusterIP + ports: + - port: 12048 + protocol: UDP + targetPort: 12048 + name: port-12048 + - port: 13048 + protocol: TCP + targetPort: 13048 + name: port-13048 + - port: 15048 + protocol: TCP + targetPort: 15048 + name: port-15048 + - port: 16048 + protocol: TCP + targetPort: 16048 + name: port-16048 + selector: + app: ssv-node-48 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-48 + name: ssv-node-48 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-48 + template: + metadata: + labels: + app: ssv-node-48 + spec: + containers: + - name: ssv-node-48 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12048 + name: port-12048 + protocol: UDP + hostPort: 12048 + - containerPort: 13048 + name: port-13048 + hostPort: 13048 + - containerPort: 15048 + name: port-15048 + hostPort: 15048 + - containerPort: 16048 + name: port-16048 + hostPort: 16048 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15048" + - name: SSV_API_PORT + value: "16048" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-48 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-48-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-48 + persistentVolumeClaim: + claimName: ssv-node-48 + - name: ssv-node-48-cm + configMap: + name: ssv-node-48-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml new file mode 100644 index 0000000000..2ecc568451 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-49-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-49 +spec: + type: ClusterIP + ports: + - port: 12049 + protocol: UDP + targetPort: 12049 + name: port-12049 + - port: 13049 + protocol: TCP + targetPort: 13049 + name: port-13049 + - port: 15049 + protocol: TCP + targetPort: 15049 + name: port-15049 + - port: 16049 + protocol: TCP + targetPort: 16049 + name: port-16049 + selector: + app: ssv-node-49 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-49 + name: ssv-node-49 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-49 + template: + metadata: + labels: + app: ssv-node-49 + spec: + containers: + - name: ssv-node-49 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12049 + name: port-12049 + protocol: UDP + hostPort: 12049 + - containerPort: 13049 + name: port-13049 + hostPort: 13049 + - containerPort: 15049 + name: port-15049 + hostPort: 15049 + - containerPort: 16049 + name: port-16049 + hostPort: 16049 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15049" + - name: SSV_API_PORT + value: "16049" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-49 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-49-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-49 + persistentVolumeClaim: + claimName: ssv-node-49 + - name: ssv-node-49-cm + configMap: + name: ssv-node-49-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml new file mode 100644 index 0000000000..041f35a3dc --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-5-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-5 +spec: + type: ClusterIP + ports: + - port: 12005 + protocol: UDP + targetPort: 12005 + name: port-12005 + - port: 13005 + protocol: TCP + targetPort: 13005 + name: port-13005 + - port: 15005 + protocol: TCP + targetPort: 15005 + name: port-15005 + - port: 16005 + protocol: TCP + targetPort: 16005 + name: port-16005 + selector: + app: ssv-node-5 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-5 + name: ssv-node-5 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-5 + template: + metadata: + labels: + app: ssv-node-5 + spec: + containers: + - name: ssv-node-5 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12005 + name: port-12005 + protocol: UDP + hostPort: 12005 + - containerPort: 13005 + name: port-13005 + hostPort: 13005 + - containerPort: 15005 + name: port-15005 + hostPort: 15005 + - containerPort: 16005 + name: port-16005 + hostPort: 16005 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15005" + - name: SSV_API_PORT + value: "16005" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-5 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-5-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-5 + persistentVolumeClaim: + claimName: ssv-node-5 + - name: ssv-node-5-cm + configMap: + name: ssv-node-5-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml new file mode 100644 index 0000000000..5078410bbc --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-50-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-50 +spec: + type: ClusterIP + ports: + - port: 12050 + protocol: UDP + targetPort: 12050 + name: port-12050 + - port: 13050 + protocol: TCP + targetPort: 13050 + name: port-13050 + - port: 15050 + protocol: TCP + targetPort: 15050 + name: port-15050 + - port: 16050 + protocol: TCP + targetPort: 16050 + name: port-16050 + selector: + app: ssv-node-50 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-50 + name: ssv-node-50 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-50 + template: + metadata: + labels: + app: ssv-node-50 + spec: + containers: + - name: ssv-node-50 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12050 + name: port-12050 + protocol: UDP + hostPort: 12050 + - containerPort: 13050 + name: port-13050 + hostPort: 13050 + - containerPort: 15050 + name: port-15050 + hostPort: 15050 + - containerPort: 16050 + name: port-16050 + hostPort: 16050 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15050" + - name: SSV_API_PORT + value: "16050" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-50 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-50-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-50 + persistentVolumeClaim: + claimName: ssv-node-50 + - name: ssv-node-50-cm + configMap: + name: ssv-node-50-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml new file mode 100644 index 0000000000..48664649e0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-51-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-51 +spec: + type: ClusterIP + ports: + - port: 12051 + protocol: UDP + targetPort: 12051 + name: port-12051 + - port: 13051 + protocol: TCP + targetPort: 13051 + name: port-13051 + - port: 15051 + protocol: TCP + targetPort: 15051 + name: port-15051 + - port: 16051 + protocol: TCP + targetPort: 16051 + name: port-16051 + selector: + app: ssv-node-51 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-51 + name: ssv-node-51 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-51 + template: + metadata: + labels: + app: ssv-node-51 + spec: + containers: + - name: ssv-node-51 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12051 + name: port-12051 + protocol: UDP + hostPort: 12051 + - containerPort: 13051 + name: port-13051 + hostPort: 13051 + - containerPort: 15051 + name: port-15051 + hostPort: 15051 + - containerPort: 16051 + name: port-16051 + hostPort: 16051 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15051" + - name: SSV_API_PORT + value: "16051" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-51 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-51-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-51 + persistentVolumeClaim: + claimName: ssv-node-51 + - name: ssv-node-51-cm + configMap: + name: ssv-node-51-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml new file mode 100644 index 0000000000..860a299915 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-52-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-52 +spec: + type: ClusterIP + ports: + - port: 12052 + protocol: UDP + targetPort: 12052 + name: port-12052 + - port: 13052 + protocol: TCP + targetPort: 13052 + name: port-13052 + - port: 15052 + protocol: TCP + targetPort: 15052 + name: port-15052 + - port: 16052 + protocol: TCP + targetPort: 16052 + name: port-16052 + selector: + app: ssv-node-52 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-52 + name: ssv-node-52 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-52 + template: + metadata: + labels: + app: ssv-node-52 + spec: + containers: + - name: ssv-node-52 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12052 + name: port-12052 + protocol: UDP + hostPort: 12052 + - containerPort: 13052 + name: port-13052 + hostPort: 13052 + - containerPort: 15052 + name: port-15052 + hostPort: 15052 + - containerPort: 16052 + name: port-16052 + hostPort: 16052 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15052" + - name: SSV_API_PORT + value: "16052" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-52 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-52-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-52 + persistentVolumeClaim: + claimName: ssv-node-52 + - name: ssv-node-52-cm + configMap: + name: ssv-node-52-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml new file mode 100644 index 0000000000..5f7e0a001e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-53-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-53 +spec: + type: ClusterIP + ports: + - port: 12053 + protocol: UDP + targetPort: 12053 + name: port-12053 + - port: 13053 + protocol: TCP + targetPort: 13053 + name: port-13053 + - port: 15053 + protocol: TCP + targetPort: 15053 + name: port-15053 + - port: 16053 + protocol: TCP + targetPort: 16053 + name: port-16053 + selector: + app: ssv-node-53 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-53 + name: ssv-node-53 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-53 + template: + metadata: + labels: + app: ssv-node-53 + spec: + containers: + - name: ssv-node-53 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12053 + name: port-12053 + protocol: UDP + hostPort: 12053 + - containerPort: 13053 + name: port-13053 + hostPort: 13053 + - containerPort: 15053 + name: port-15053 + hostPort: 15053 + - containerPort: 16053 + name: port-16053 + hostPort: 16053 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15053" + - name: SSV_API_PORT + value: "16053" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-53 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-53-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-53 + persistentVolumeClaim: + claimName: ssv-node-53 + - name: ssv-node-53-cm + configMap: + name: ssv-node-53-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml new file mode 100644 index 0000000000..fc4e56e47d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-54-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-54 +spec: + type: ClusterIP + ports: + - port: 12054 + protocol: UDP + targetPort: 12054 + name: port-12054 + - port: 13054 + protocol: TCP + targetPort: 13054 + name: port-13054 + - port: 15054 + protocol: TCP + targetPort: 15054 + name: port-15054 + - port: 16054 + protocol: TCP + targetPort: 16054 + name: port-16054 + selector: + app: ssv-node-54 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-54 + name: ssv-node-54 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-54 + template: + metadata: + labels: + app: ssv-node-54 + spec: + containers: + - name: ssv-node-54 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12054 + name: port-12054 + protocol: UDP + hostPort: 12054 + - containerPort: 13054 + name: port-13054 + hostPort: 13054 + - containerPort: 15054 + name: port-15054 + hostPort: 15054 + - containerPort: 16054 + name: port-16054 + hostPort: 16054 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15054" + - name: SSV_API_PORT + value: "16054" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-54 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-54-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-54 + persistentVolumeClaim: + claimName: ssv-node-54 + - name: ssv-node-54-cm + configMap: + name: ssv-node-54-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml new file mode 100644 index 0000000000..ab5df31101 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-55-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-55 +spec: + type: ClusterIP + ports: + - port: 12055 + protocol: UDP + targetPort: 12055 + name: port-12055 + - port: 13055 + protocol: TCP + targetPort: 13055 + name: port-13055 + - port: 15055 + protocol: TCP + targetPort: 15055 + name: port-15055 + - port: 16055 + protocol: TCP + targetPort: 16055 + name: port-16055 + selector: + app: ssv-node-55 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-55 + name: ssv-node-55 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-55 + template: + metadata: + labels: + app: ssv-node-55 + spec: + containers: + - name: ssv-node-55 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12055 + name: port-12055 + protocol: UDP + hostPort: 12055 + - containerPort: 13055 + name: port-13055 + hostPort: 13055 + - containerPort: 15055 + name: port-15055 + hostPort: 15055 + - containerPort: 16055 + name: port-16055 + hostPort: 16055 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15055" + - name: SSV_API_PORT + value: "16055" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-55 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-55-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-55 + persistentVolumeClaim: + claimName: ssv-node-55 + - name: ssv-node-55-cm + configMap: + name: ssv-node-55-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml new file mode 100644 index 0000000000..054a1d46e1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-56-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-56 +spec: + type: ClusterIP + ports: + - port: 12056 + protocol: UDP + targetPort: 12056 + name: port-12056 + - port: 13056 + protocol: TCP + targetPort: 13056 + name: port-13056 + - port: 15056 + protocol: TCP + targetPort: 15056 + name: port-15056 + - port: 16056 + protocol: TCP + targetPort: 16056 + name: port-16056 + selector: + app: ssv-node-56 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-56 + name: ssv-node-56 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-56 + template: + metadata: + labels: + app: ssv-node-56 + spec: + containers: + - name: ssv-node-56 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12056 + name: port-12056 + protocol: UDP + hostPort: 12056 + - containerPort: 13056 + name: port-13056 + hostPort: 13056 + - containerPort: 15056 + name: port-15056 + hostPort: 15056 + - containerPort: 16056 + name: port-16056 + hostPort: 16056 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15056" + - name: SSV_API_PORT + value: "16056" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-56 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-56-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-56 + persistentVolumeClaim: + claimName: ssv-node-56 + - name: ssv-node-56-cm + configMap: + name: ssv-node-56-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml new file mode 100644 index 0000000000..ea75a6043c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-57-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-57 +spec: + type: ClusterIP + ports: + - port: 12057 + protocol: UDP + targetPort: 12057 + name: port-12057 + - port: 13057 + protocol: TCP + targetPort: 13057 + name: port-13057 + - port: 15057 + protocol: TCP + targetPort: 15057 + name: port-15057 + - port: 16057 + protocol: TCP + targetPort: 16057 + name: port-16057 + selector: + app: ssv-node-57 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-57 + name: ssv-node-57 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-57 + template: + metadata: + labels: + app: ssv-node-57 + spec: + containers: + - name: ssv-node-57 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12057 + name: port-12057 + protocol: UDP + hostPort: 12057 + - containerPort: 13057 + name: port-13057 + hostPort: 13057 + - containerPort: 15057 + name: port-15057 + hostPort: 15057 + - containerPort: 16057 + name: port-16057 + hostPort: 16057 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15057" + - name: SSV_API_PORT + value: "16057" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-57 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-57-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-57 + persistentVolumeClaim: + claimName: ssv-node-57 + - name: ssv-node-57-cm + configMap: + name: ssv-node-57-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml new file mode 100644 index 0000000000..049a3a3112 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-58-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-58 +spec: + type: ClusterIP + ports: + - port: 12058 + protocol: UDP + targetPort: 12058 + name: port-12058 + - port: 13058 + protocol: TCP + targetPort: 13058 + name: port-13058 + - port: 15858 + protocol: TCP + targetPort: 15858 + name: port-15858 + - port: 16058 + protocol: TCP + targetPort: 16058 + name: port-16058 + selector: + app: ssv-node-58 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-58 + name: ssv-node-58 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-58 + template: + metadata: + labels: + app: ssv-node-58 + spec: + containers: + - name: ssv-node-58 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12058 + name: port-12058 + protocol: UDP + hostPort: 12058 + - containerPort: 13058 + name: port-13058 + hostPort: 13058 + - containerPort: 15858 + name: port-15858 + hostPort: 15858 + - containerPort: 16058 + name: port-16058 + hostPort: 16058 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15858" + - name: SSV_API_PORT + value: "16058" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-58 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-58-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-58 + persistentVolumeClaim: + claimName: ssv-node-58 + - name: ssv-node-58-cm + configMap: + name: ssv-node-58-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml new file mode 100644 index 0000000000..1519c403d2 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-59-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-59 +spec: + type: ClusterIP + ports: + - port: 12059 + protocol: UDP + targetPort: 12059 + name: port-12059 + - port: 13059 + protocol: TCP + targetPort: 13059 + name: port-13059 + - port: 15059 + protocol: TCP + targetPort: 15059 + name: port-15059 + - port: 16059 + protocol: TCP + targetPort: 16059 + name: port-16059 + selector: + app: ssv-node-59 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-59 + name: ssv-node-59 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-59 + template: + metadata: + labels: + app: ssv-node-59 + spec: + containers: + - name: ssv-node-59 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12059 + name: port-12059 + protocol: UDP + hostPort: 12059 + - containerPort: 13059 + name: port-13059 + hostPort: 13059 + - containerPort: 15059 + name: port-15059 + hostPort: 15059 + - containerPort: 16059 + name: port-16059 + hostPort: 16059 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15059" + - name: SSV_API_PORT + value: "16059" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-59 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-59-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-59 + persistentVolumeClaim: + claimName: ssv-node-59 + - name: ssv-node-59-cm + configMap: + name: ssv-node-59-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml new file mode 100644 index 0000000000..8f04d5b19b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-6-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-6 +spec: + type: ClusterIP + ports: + - port: 12006 + protocol: UDP + targetPort: 12006 + name: port-12006 + - port: 13006 + protocol: TCP + targetPort: 13006 + name: port-13006 + - port: 15006 + protocol: TCP + targetPort: 15006 + name: port-15006 + - port: 16006 + protocol: TCP + targetPort: 16006 + name: port-16006 + selector: + app: ssv-node-6 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-6 + name: ssv-node-6 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-6 + template: + metadata: + labels: + app: ssv-node-6 + spec: + containers: + - name: ssv-node-6 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12006 + name: port-12006 + protocol: UDP + hostPort: 12006 + - containerPort: 13006 + name: port-13006 + hostPort: 13006 + - containerPort: 15006 + name: port-15006 + hostPort: 15006 + - containerPort: 16006 + name: port-16006 + hostPort: 16006 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15006" + - name: SSV_API_PORT + value: "16006" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-6 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-6-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-6 + persistentVolumeClaim: + claimName: ssv-node-6 + - name: ssv-node-6-cm + configMap: + name: ssv-node-6-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml new file mode 100644 index 0000000000..a3362acda5 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-60-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-60 +spec: + type: ClusterIP + ports: + - port: 12060 + protocol: UDP + targetPort: 12060 + name: port-12060 + - port: 13060 + protocol: TCP + targetPort: 13060 + name: port-13060 + - port: 15060 + protocol: TCP + targetPort: 15060 + name: port-15060 + - port: 16060 + protocol: TCP + targetPort: 16060 + name: port-16060 + selector: + app: ssv-node-60 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-60 + name: ssv-node-60 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-60 + template: + metadata: + labels: + app: ssv-node-60 + spec: + containers: + - name: ssv-node-60 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12060 + name: port-12060 + protocol: UDP + hostPort: 12060 + - containerPort: 13060 + name: port-13060 + hostPort: 13060 + - containerPort: 15060 + name: port-15060 + hostPort: 15060 + - containerPort: 16060 + name: port-16060 + hostPort: 16060 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15060" + - name: SSV_API_PORT + value: "16060" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-60 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-60-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-60 + persistentVolumeClaim: + claimName: ssv-node-60 + - name: ssv-node-60-cm + configMap: + name: ssv-node-60-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml new file mode 100644 index 0000000000..c8ff819411 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-61-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-61 +spec: + type: ClusterIP + ports: + - port: 12061 + protocol: UDP + targetPort: 12061 + name: port-12061 + - port: 13061 + protocol: TCP + targetPort: 13061 + name: port-13061 + - port: 15061 + protocol: TCP + targetPort: 15061 + name: port-15061 + - port: 16061 + protocol: TCP + targetPort: 16061 + name: port-16061 + selector: + app: ssv-node-61 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-61 + name: ssv-node-61 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-61 + template: + metadata: + labels: + app: ssv-node-61 + spec: + containers: + - name: ssv-node-61 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12061 + name: port-12061 + protocol: UDP + hostPort: 12061 + - containerPort: 13061 + name: port-13061 + hostPort: 13061 + - containerPort: 15061 + name: port-15061 + hostPort: 15061 + - containerPort: 16061 + name: port-16061 + hostPort: 16061 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15061" + - name: SSV_API_PORT + value: "16061" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-61 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-61-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-61 + persistentVolumeClaim: + claimName: ssv-node-61 + - name: ssv-node-61-cm + configMap: + name: ssv-node-61-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml new file mode 100644 index 0000000000..6d8d4a6ac6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-62-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-62 +spec: + type: ClusterIP + ports: + - port: 12062 + protocol: UDP + targetPort: 12062 + name: port-12062 + - port: 13062 + protocol: TCP + targetPort: 13062 + name: port-13062 + - port: 15062 + protocol: TCP + targetPort: 15062 + name: port-15062 + - port: 16062 + protocol: TCP + targetPort: 16062 + name: port-16062 + selector: + app: ssv-node-62 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-62 + name: ssv-node-62 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-62 + template: + metadata: + labels: + app: ssv-node-62 + spec: + containers: + - name: ssv-node-62 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12062 + name: port-12062 + protocol: UDP + hostPort: 12062 + - containerPort: 13062 + name: port-13062 + hostPort: 13062 + - containerPort: 15062 + name: port-15062 + hostPort: 15062 + - containerPort: 16062 + name: port-16062 + hostPort: 16062 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15062" + - name: SSV_API_PORT + value: "16062" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-62 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-62-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-62 + persistentVolumeClaim: + claimName: ssv-node-62 + - name: ssv-node-62-cm + configMap: + name: ssv-node-62-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml new file mode 100644 index 0000000000..a98de28b65 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-63-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-63 +spec: + type: ClusterIP + ports: + - port: 12063 + protocol: UDP + targetPort: 12063 + name: port-12063 + - port: 13063 + protocol: TCP + targetPort: 13063 + name: port-13063 + - port: 15063 + protocol: TCP + targetPort: 15063 + name: port-15063 + - port: 16063 + protocol: TCP + targetPort: 16063 + name: port-16063 + selector: + app: ssv-node-63 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-63 + name: ssv-node-63 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-63 + template: + metadata: + labels: + app: ssv-node-63 + spec: + containers: + - name: ssv-node-63 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12063 + name: port-12063 + protocol: UDP + hostPort: 12063 + - containerPort: 13063 + name: port-13063 + hostPort: 13063 + - containerPort: 15063 + name: port-15063 + hostPort: 15063 + - containerPort: 16063 + name: port-16063 + hostPort: 16063 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15063" + - name: SSV_API_PORT + value: "16063" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-63 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-63-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-63 + persistentVolumeClaim: + claimName: ssv-node-63 + - name: ssv-node-63-cm + configMap: + name: ssv-node-63-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml new file mode 100644 index 0000000000..9baafd765a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-64-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-64 +spec: + type: ClusterIP + ports: + - port: 12064 + protocol: UDP + targetPort: 12064 + name: port-12064 + - port: 13064 + protocol: TCP + targetPort: 13064 + name: port-13064 + - port: 15064 + protocol: TCP + targetPort: 15064 + name: port-15064 + - port: 16064 + protocol: TCP + targetPort: 16064 + name: port-16064 + selector: + app: ssv-node-64 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-64 + name: ssv-node-64 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-64 + template: + metadata: + labels: + app: ssv-node-64 + spec: + containers: + - name: ssv-node-64 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12064 + name: port-12064 + protocol: UDP + hostPort: 12064 + - containerPort: 13064 + name: port-13064 + hostPort: 13064 + - containerPort: 15064 + name: port-15064 + hostPort: 15064 + - containerPort: 16064 + name: port-16064 + hostPort: 16064 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15064" + - name: SSV_API_PORT + value: "16064" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-64 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-64-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-64 + persistentVolumeClaim: + claimName: ssv-node-64 + - name: ssv-node-64-cm + configMap: + name: ssv-node-64-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml new file mode 100644 index 0000000000..cd3cbfeed6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-65-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-65 +spec: + type: ClusterIP + ports: + - port: 12065 + protocol: UDP + targetPort: 12065 + name: port-12065 + - port: 13065 + protocol: TCP + targetPort: 13065 + name: port-13065 + - port: 15065 + protocol: TCP + targetPort: 15065 + name: port-15065 + - port: 16065 + protocol: TCP + targetPort: 16065 + name: port-16065 + selector: + app: ssv-node-65 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-65 + name: ssv-node-65 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-65 + template: + metadata: + labels: + app: ssv-node-65 + spec: + containers: + - name: ssv-node-65 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12065 + name: port-12065 + protocol: UDP + hostPort: 12065 + - containerPort: 13065 + name: port-13065 + hostPort: 13065 + - containerPort: 15065 + name: port-15065 + hostPort: 15065 + - containerPort: 16065 + name: port-16065 + hostPort: 16065 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15065" + - name: SSV_API_PORT + value: "16065" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-65 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-65-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-65 + persistentVolumeClaim: + claimName: ssv-node-65 + - name: ssv-node-65-cm + configMap: + name: ssv-node-65-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml new file mode 100644 index 0000000000..b8888cb2ae --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-66-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-66 +spec: + type: ClusterIP + ports: + - port: 12066 + protocol: UDP + targetPort: 12066 + name: port-12066 + - port: 13066 + protocol: TCP + targetPort: 13066 + name: port-13066 + - port: 15066 + protocol: TCP + targetPort: 15066 + name: port-15066 + - port: 16066 + protocol: TCP + targetPort: 16066 + name: port-16066 + selector: + app: ssv-node-66 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-66 + name: ssv-node-66 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-66 + template: + metadata: + labels: + app: ssv-node-66 + spec: + containers: + - name: ssv-node-66 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12066 + name: port-12066 + protocol: UDP + hostPort: 12066 + - containerPort: 13066 + name: port-13066 + hostPort: 13066 + - containerPort: 15066 + name: port-15066 + hostPort: 15066 + - containerPort: 16066 + name: port-16066 + hostPort: 16066 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15066" + - name: SSV_API_PORT + value: "16066" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-66 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-66-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-66 + persistentVolumeClaim: + claimName: ssv-node-66 + - name: ssv-node-66-cm + configMap: + name: ssv-node-66-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml new file mode 100644 index 0000000000..0f5a78da4a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-67-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-67 +spec: + type: ClusterIP + ports: + - port: 12067 + protocol: UDP + targetPort: 12067 + name: port-12067 + - port: 13067 + protocol: TCP + targetPort: 13067 + name: port-13067 + - port: 15067 + protocol: TCP + targetPort: 15067 + name: port-15067 + - port: 16067 + protocol: TCP + targetPort: 16067 + name: port-16067 + selector: + app: ssv-node-67 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-67 + name: ssv-node-67 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-67 + template: + metadata: + labels: + app: ssv-node-67 + spec: + containers: + - name: ssv-node-67 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12067 + name: port-12067 + protocol: UDP + hostPort: 12067 + - containerPort: 13067 + name: port-13067 + hostPort: 13067 + - containerPort: 15067 + name: port-15067 + hostPort: 15067 + - containerPort: 16067 + name: port-16067 + hostPort: 16067 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15067" + - name: SSV_API_PORT + value: "16067" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-67 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-67-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-67 + persistentVolumeClaim: + claimName: ssv-node-67 + - name: ssv-node-67-cm + configMap: + name: ssv-node-67-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml new file mode 100644 index 0000000000..2a72b67d40 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-68-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-68 +spec: + type: ClusterIP + ports: + - port: 12068 + protocol: UDP + targetPort: 12068 + name: port-12068 + - port: 13068 + protocol: TCP + targetPort: 13068 + name: port-13068 + - port: 15068 + protocol: TCP + targetPort: 15068 + name: port-15068 + - port: 16068 + protocol: TCP + targetPort: 16068 + name: port-16068 + selector: + app: ssv-node-68 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-68 + name: ssv-node-68 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-68 + template: + metadata: + labels: + app: ssv-node-68 + spec: + containers: + - name: ssv-node-68 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12068 + name: port-12068 + protocol: UDP + hostPort: 12068 + - containerPort: 13068 + name: port-13068 + hostPort: 13068 + - containerPort: 15068 + name: port-15068 + hostPort: 15068 + - containerPort: 16068 + name: port-16068 + hostPort: 16068 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15068" + - name: SSV_API_PORT + value: "16068" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-68 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-68-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-68 + persistentVolumeClaim: + claimName: ssv-node-68 + - name: ssv-node-68-cm + configMap: + name: ssv-node-68-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml new file mode 100644 index 0000000000..2a3a6f05fd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-69-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-69 +spec: + type: ClusterIP + ports: + - port: 12069 + protocol: UDP + targetPort: 12069 + name: port-12069 + - port: 13069 + protocol: TCP + targetPort: 13069 + name: port-13069 + - port: 15069 + protocol: TCP + targetPort: 15069 + name: port-15069 + - port: 16069 + protocol: TCP + targetPort: 16069 + name: port-16069 + selector: + app: ssv-node-69 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-69 + name: ssv-node-69 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-69 + template: + metadata: + labels: + app: ssv-node-69 + spec: + containers: + - name: ssv-node-69 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12069 + name: port-12069 + protocol: UDP + hostPort: 12069 + - containerPort: 13069 + name: port-13069 + hostPort: 13069 + - containerPort: 15069 + name: port-15069 + hostPort: 15069 + - containerPort: 16069 + name: port-16069 + hostPort: 16069 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15069" + - name: SSV_API_PORT + value: "16069" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-69 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-69-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-69 + persistentVolumeClaim: + claimName: ssv-node-69 + - name: ssv-node-69-cm + configMap: + name: ssv-node-69-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml new file mode 100644 index 0000000000..1e3d450f6e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-7-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-7 +spec: + type: ClusterIP + ports: + - port: 12007 + protocol: UDP + targetPort: 12007 + name: port-12007 + - port: 13007 + protocol: TCP + targetPort: 13007 + name: port-13007 + - port: 15007 + protocol: TCP + targetPort: 15007 + name: port-15007 + - port: 16007 + protocol: TCP + targetPort: 16007 + name: port-16007 + selector: + app: ssv-node-7 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-7 + name: ssv-node-7 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-7 + template: + metadata: + labels: + app: ssv-node-7 + spec: + containers: + - name: ssv-node-7 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12007 + name: port-12007 + protocol: UDP + hostPort: 12007 + - containerPort: 13007 + name: port-13007 + hostPort: 13007 + - containerPort: 15007 + name: port-15007 + hostPort: 15007 + - containerPort: 16007 + name: port-16007 + hostPort: 16007 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15007" + - name: SSV_API_PORT + value: "16007" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-7 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-7-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-7 + persistentVolumeClaim: + claimName: ssv-node-7 + - name: ssv-node-7-cm + configMap: + name: ssv-node-7-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml new file mode 100644 index 0000000000..8d8caa0823 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-70-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-70 +spec: + type: ClusterIP + ports: + - port: 12070 + protocol: UDP + targetPort: 12070 + name: port-12070 + - port: 13070 + protocol: TCP + targetPort: 13070 + name: port-13070 + - port: 15070 + protocol: TCP + targetPort: 15070 + name: port-15070 + - port: 16070 + protocol: TCP + targetPort: 16070 + name: port-16070 + selector: + app: ssv-node-70 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-70 + name: ssv-node-70 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-70 + template: + metadata: + labels: + app: ssv-node-70 + spec: + containers: + - name: ssv-node-70 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12070 + name: port-12070 + protocol: UDP + hostPort: 12070 + - containerPort: 13070 + name: port-13070 + hostPort: 13070 + - containerPort: 15070 + name: port-15070 + hostPort: 15070 + - containerPort: 16070 + name: port-16070 + hostPort: 16070 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15070" + - name: SSV_API_PORT + value: "16070" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-70 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-70-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-70 + persistentVolumeClaim: + claimName: ssv-node-70 + - name: ssv-node-70-cm + configMap: + name: ssv-node-70-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml new file mode 100644 index 0000000000..919749db9b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-71-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-71 +spec: + type: ClusterIP + ports: + - port: 12071 + protocol: UDP + targetPort: 12071 + name: port-12071 + - port: 13071 + protocol: TCP + targetPort: 13071 + name: port-13071 + - port: 15071 + protocol: TCP + targetPort: 15071 + name: port-15071 + - port: 16071 + protocol: TCP + targetPort: 16071 + name: port-16071 + selector: + app: ssv-node-71 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-71 + name: ssv-node-71 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-71 + template: + metadata: + labels: + app: ssv-node-71 + spec: + containers: + - name: ssv-node-71 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12071 + name: port-12071 + protocol: UDP + hostPort: 12071 + - containerPort: 13071 + name: port-13071 + hostPort: 13071 + - containerPort: 15071 + name: port-15071 + hostPort: 15071 + - containerPort: 16071 + name: port-16071 + hostPort: 16071 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15071" + - name: SSV_API_PORT + value: "16071" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-71 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-71-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-71 + persistentVolumeClaim: + claimName: ssv-node-71 + - name: ssv-node-71-cm + configMap: + name: ssv-node-71-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml new file mode 100644 index 0000000000..73dae0d45f --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-72-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-72 +spec: + type: ClusterIP + ports: + - port: 12072 + protocol: UDP + targetPort: 12072 + name: port-12072 + - port: 13072 + protocol: TCP + targetPort: 13072 + name: port-13072 + - port: 15072 + protocol: TCP + targetPort: 15072 + name: port-15072 + - port: 16072 + protocol: TCP + targetPort: 16072 + name: port-16072 + selector: + app: ssv-node-72 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-72 + name: ssv-node-72 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-72 + template: + metadata: + labels: + app: ssv-node-72 + spec: + containers: + - name: ssv-node-72 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12072 + name: port-12072 + protocol: UDP + hostPort: 12072 + - containerPort: 13072 + name: port-13072 + hostPort: 13072 + - containerPort: 15072 + name: port-15072 + hostPort: 15072 + - containerPort: 16072 + name: port-16072 + hostPort: 16072 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15072" + - name: SSV_API_PORT + value: "16072" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-72 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-72-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-72 + persistentVolumeClaim: + claimName: ssv-node-72 + - name: ssv-node-72-cm + configMap: + name: ssv-node-72-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml new file mode 100644 index 0000000000..37f3352ea8 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-8-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-8 +spec: + type: ClusterIP + ports: + - port: 12008 + protocol: UDP + targetPort: 12008 + name: port-12008 + - port: 13008 + protocol: TCP + targetPort: 13008 + name: port-13008 + - port: 15008 + protocol: TCP + targetPort: 15008 + name: port-15008 + - port: 16008 + protocol: TCP + targetPort: 16008 + name: port-16008 + selector: + app: ssv-node-8 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-8 + name: ssv-node-8 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-8 + template: + metadata: + labels: + app: ssv-node-8 + spec: + containers: + - name: ssv-node-8 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12008 + name: port-12008 + protocol: UDP + hostPort: 12008 + - containerPort: 13008 + name: port-13008 + hostPort: 13008 + - containerPort: 15008 + name: port-15008 + hostPort: 15008 + - containerPort: 16008 + name: port-16008 + hostPort: 16008 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15008" + - name: SSV_API_PORT + value: "16008" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-8 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-8-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-8 + persistentVolumeClaim: + claimName: ssv-node-8 + - name: ssv-node-8-cm + configMap: + name: ssv-node-8-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml new file mode 100644 index 0000000000..3904816442 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-9-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-9 +spec: + type: ClusterIP + ports: + - port: 12009 + protocol: UDP + targetPort: 12009 + name: port-12009 + - port: 13009 + protocol: TCP + targetPort: 13009 + name: port-13009 + - port: 15009 + protocol: TCP + targetPort: 15009 + name: port-15009 + - port: 16009 + protocol: TCP + targetPort: 16009 + name: port-16009 + selector: + app: ssv-node-9 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-9 + name: ssv-node-9 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-9 + template: + metadata: + labels: + app: ssv-node-9 + spec: + containers: + - name: ssv-node-9 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12009 + name: port-12009 + protocol: UDP + hostPort: 12009 + - containerPort: 13009 + name: port-13009 + hostPort: 13009 + - containerPort: 15009 + name: port-15009 + hostPort: 15009 + - containerPort: 16009 + name: port-16009 + hostPort: 16009 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-jato-v2" + - name: NETWORK + value: "jato-v2-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15009" + - name: SSV_API_PORT + value: "16009" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-9 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-9-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-9 + persistentVolumeClaim: + claimName: ssv-node-9 + - name: ssv-node-9-cm + configMap: + name: ssv-node-9-cm + hostNetwork: true From 6ad684cb8cb081649670493381050fe3943c32fa Mon Sep 17 00:00:00 2001 From: systemblox <40427708+systemblox@users.noreply.github.com> Date: Tue, 24 Oct 2023 15:00:00 +0300 Subject: [PATCH 19/54] Updated Hetzner ci/cd (#1169) * Updated Hetzner ci/cd --------- Co-authored-by: stoyan.peev --- .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh | 0 .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh | 0 18 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh mode change 100644 => 100755 .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh old mode 100644 new mode 100755 diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh old mode 100644 new mode 100755 From a2fde382735cf196ec6f576ce8a324cad84176ba Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Tue, 24 Oct 2023 15:05:22 +0300 Subject: [PATCH 20/54] holesky exporter deployment files (#1170) * holesky exporter deployment files * migrate holesky exporetr to hetzner --- .gitlab-ci.yml | 16 ++ .k8/stage/scripts/deploy-holesky-exporters.sh | 104 ++++++++++ .k8/stage/ssv-exporter-holesky.yml | 187 ++++++++++++++++++ 3 files changed, 307 insertions(+) create mode 100644 .k8/stage/scripts/deploy-holesky-exporters.sh create mode 100644 .k8/stage/ssv-exporter-holesky.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f0819461bc..6ced818249 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -131,6 +131,22 @@ Deploy nodes to hetzner stage: only: - stage +Deploy exporter to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT + - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + - .k8/stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + only: + - stage + # +---------------+ # | Prod | # +---------------+ diff --git a/.k8/stage/scripts/deploy-holesky-exporters.sh b/.k8/stage/scripts/deploy-holesky-exporters.sh new file mode 100644 index 0000000000..c384e72012 --- /dev/null +++ b/.k8/stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done \ No newline at end of file diff --git a/.k8/stage/ssv-exporter-holesky.yml b/.k8/stage/ssv-exporter-holesky.yml new file mode 100644 index 0000000000..a665b13ae5 --- /dev/null +++ b/.k8/stage/ssv-exporter-holesky.yml @@ -0,0 +1,187 @@ +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE +spec: + hosts: + - "ws-exporter-holesky.REPLACE_DOMAIN_SUFFIX" + gateways: + - ssv-exporter-holesky + http: + - route: + - destination: + host: ssv-exporter-holesky + port: + number: 14013 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE +spec: + selector: + istio: ingressgateway-int + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "ws-exporter-holesky.REPLACE_DOMAIN_SUFFIX" +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE + labels: + app: ssv-exporter-holesky +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 14013 + protocol: TCP + targetPort: 14013 + name: port-14013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-exporter-holesky +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-exporter-holesky + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-exporter-holesky + template: + metadata: + labels: + app: ssv-exporter-holesky + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + containers: + - name: ssv-exporter-holesky + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_EXPORTER_CPU_LIMIT + memory: REPLACE_EXPORTER_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + hostPort: 12013 + protocol: UDP + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 14013 + name: port-14013 + hostPort: 14013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv.*" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: UDP_PORT + value: "12013" + - name: TCP_PORT + value: "13003" + - name: WS_API_PORT + value: "14013" + - name: FULLNODE + value: "true" + - name: EXPORTER + value: "true" + - name: DISCOVERY_TRACE + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: SUBNETS + value: "0xffffffffffffffffffffffffffffffff" + volumeMounts: + - mountPath: /data + name: ssv-exporter-holesky + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-exporter-holesky-cm + volumes: + - name: ssv-exporter-holesky + persistentVolumeClaim: + claimName: ssv-exporter-holesky + - name: ssv-exporter-holesky-cm + configMap: + name: ssv-exporter-holesky-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true From c25ed31cd797ee0465c40a532ca6d2854984071f Mon Sep 17 00:00:00 2001 From: systemblox <40427708+systemblox@users.noreply.github.com> Date: Tue, 24 Oct 2023 15:49:00 +0300 Subject: [PATCH 21/54] Stage fix ci cd (#1172) * Updated gitlab-ci * Updated permissions * Updated env * Updated ci/cd * Updated gitlab-ci * Updated gitlab-ci * Updated holesky exporter * Updated holesky exporter --------- Co-authored-by: stoyan.peev --- .gitlab-ci.yml | 7 +- .../scripts/deploy-holesky-exporters.sh | 104 +++++++++++++ .k8/hetzner-stage/ssv-exporter-holesky.yml | 143 ++++++++++++++++++ .k8/stage/scripts/deploy-holesky-exporters.sh | 0 4 files changed, 251 insertions(+), 3 deletions(-) create mode 100755 .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh create mode 100644 .k8/hetzner-stage/ssv-exporter-holesky.yml mode change 100644 => 100755 .k8/stage/scripts/deploy-holesky-exporters.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6ced818249..5b1f7c4a50 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -133,17 +133,18 @@ Deploy nodes to hetzner stage: Deploy exporter to hetzner stage: stage: deploy + image: bitnami/kubectl:1.27.5 tags: - hetzner-k8s-stage script: - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION - - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT - - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT + - export SSV_EXPORTER_MEM_LIMIT=$STAGE_SSV_EXPORTER_MEM_LIMIT - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig - mv kubeconfig ~/.kube/ - export KUBECONFIG=~/.kube/kubeconfig - kubectl config get-contexts - - .k8/stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT only: - stage diff --git a/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh new file mode 100755 index 0000000000..9a899ef3d3 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/ssv-exporter-holesky.yml b/.k8/hetzner-stage/ssv-exporter-holesky.yml new file mode 100644 index 0000000000..ae5af5299c --- /dev/null +++ b/.k8/hetzner-stage/ssv-exporter-holesky.yml @@ -0,0 +1,143 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE + labels: + app: ssv-exporter-holesky +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 14013 + protocol: TCP + targetPort: 14013 + name: port-14013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-exporter-holesky +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-exporter-holesky + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-exporter-holesky + template: + metadata: + labels: + app: ssv-exporter-holesky + spec: + containers: + - name: ssv-exporter-holesky + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_EXPORTER_CPU_LIMIT + memory: REPLACE_EXPORTER_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + hostPort: 12013 + protocol: UDP + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 14013 + name: port-14013 + hostPort: 14013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv.*" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: UDP_PORT + value: "12013" + - name: TCP_PORT + value: "13003" + - name: WS_API_PORT + value: "14013" + - name: FULLNODE + value: "true" + - name: EXPORTER + value: "true" + - name: DISCOVERY_TRACE + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: SUBNETS + value: "0xffffffffffffffffffffffffffffffff" + volumeMounts: + - mountPath: /data + name: ssv-exporter-holesky + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-exporter-holesky-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-exporter-holesky + persistentVolumeClaim: + claimName: ssv-exporter-holesky + - name: ssv-exporter-holesky-cm + configMap: + name: ssv-exporter-holesky-cm + hostNetwork: true diff --git a/.k8/stage/scripts/deploy-holesky-exporters.sh b/.k8/stage/scripts/deploy-holesky-exporters.sh old mode 100644 new mode 100755 From 3cdfd266968f8f9ad1bc6d6bfa342f696de3b783 Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Wed, 25 Oct 2023 12:31:10 +0200 Subject: [PATCH 22/54] Holesky stage migration (#1173) --------- Co-authored-by: stoyan.peev Co-authored-by: systemblox <40427708+systemblox@users.noreply.github.com> --- .gitlab-ci.yml | 2 +- .k8/hetzner-stage/ssv-exporter-holesky.yml | 9 + .k8/hetzner-stage/ssv-node-1-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-10-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-11-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-12-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-13-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-14-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-15-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-16-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-17-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-18-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-19-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-2-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-20-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-21-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-22-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-23-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-24-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-25-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-26-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-27-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-28-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-29-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-3-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-30-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-31-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-32-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-33-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-34-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-35-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-36-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-37-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-38-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-39-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-4-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-40-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-41-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-42-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-43-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-44-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-45-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-46-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-47-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-48-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-49-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-5-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-50-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-51-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-52-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-53-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-54-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-55-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-56-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-57-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-58-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-59-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-6-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-60-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-61-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-62-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-63-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-64-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-65-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-66-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-67-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-68-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-69-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-7-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-70-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-71-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-72-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-8-deployment.yml | 4 +- .k8/hetzner-stage/ssv-node-9-deployment.yml | 4 +- .k8/stage/scripts/deploy-cluster-13--16.sh | 131 ------------ .k8/stage/scripts/deploy-cluster-9--12.sh | 131 ------------ .k8/stage/scripts/deploy-holesky-exporters.sh | 4 +- .k8/stage/ssv-exporter-holesky.yml | 187 ------------------ .k8/stage/ssv-node-10-deployment.yml | 161 --------------- .k8/stage/ssv-node-11-deployment.yml | 161 --------------- .k8/stage/ssv-node-12-deployment.yml | 161 --------------- .k8/stage/ssv-node-9-deployment.yml | 161 --------------- .k8/stage/ssv-node-v2-5-deployment.yml | 4 +- .k8/stage/ssv-node-v2-6-deployment.yml | 4 +- .k8/stage/ssv-node-v2-7-deployment.yml | 4 +- .k8/stage/ssv-node-v2-8-deployment.yml | 4 +- .k8/stage/ssv-node-v3-1-deployment.yml | 149 -------------- .k8/stage/ssv-node-v3-2-deployment.yml | 141 ------------- .k8/stage/ssv-node-v3-3-deployment.yml | 149 -------------- .k8/stage/ssv-node-v3-4-deployment.yml | 141 ------------- 90 files changed, 164 insertions(+), 1828 deletions(-) delete mode 100755 .k8/stage/scripts/deploy-cluster-13--16.sh delete mode 100755 .k8/stage/scripts/deploy-cluster-9--12.sh delete mode 100644 .k8/stage/ssv-exporter-holesky.yml delete mode 100644 .k8/stage/ssv-node-10-deployment.yml delete mode 100644 .k8/stage/ssv-node-11-deployment.yml delete mode 100644 .k8/stage/ssv-node-12-deployment.yml delete mode 100644 .k8/stage/ssv-node-9-deployment.yml delete mode 100644 .k8/stage/ssv-node-v3-1-deployment.yml delete mode 100644 .k8/stage/ssv-node-v3-2-deployment.yml delete mode 100644 .k8/stage/ssv-node-v3-3-deployment.yml delete mode 100644 .k8/stage/ssv-node-v3-4-deployment.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5b1f7c4a50..f296289e6e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -133,9 +133,9 @@ Deploy nodes to hetzner stage: Deploy exporter to hetzner stage: stage: deploy - image: bitnami/kubectl:1.27.5 tags: - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 script: - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT diff --git a/.k8/hetzner-stage/ssv-exporter-holesky.yml b/.k8/hetzner-stage/ssv-exporter-holesky.yml index ae5af5299c..10fb398390 100644 --- a/.k8/hetzner-stage/ssv-exporter-holesky.yml +++ b/.k8/hetzner-stage/ssv-exporter-holesky.yml @@ -51,6 +51,15 @@ spec: labels: app: ssv-exporter-holesky spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-exporter containers: - name: ssv-exporter-holesky image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml index c36df6e259..9b11ffbce6 100644 --- a/.k8/hetzner-stage/ssv-node-1-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml index 216e789152..051cf589d4 100644 --- a/.k8/hetzner-stage/ssv-node-10-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml index c3eb635410..e15bdb7b49 100644 --- a/.k8/hetzner-stage/ssv-node-11-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml index 81df024991..ebcc12a1ac 100644 --- a/.k8/hetzner-stage/ssv-node-12-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml index b54177f184..53f1bae513 100644 --- a/.k8/hetzner-stage/ssv-node-13-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml index ca4aa3e735..65f47bc363 100644 --- a/.k8/hetzner-stage/ssv-node-14-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml index 00a87f3fde..ec59df9720 100644 --- a/.k8/hetzner-stage/ssv-node-15-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml index dfd2fd4645..f25f60b70c 100644 --- a/.k8/hetzner-stage/ssv-node-16-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml index ca58dcc94e..14561ef74c 100644 --- a/.k8/hetzner-stage/ssv-node-17-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml index 52dd6c3330..40ac470dd3 100644 --- a/.k8/hetzner-stage/ssv-node-18-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml index f60ef88662..a266c88e48 100644 --- a/.k8/hetzner-stage/ssv-node-19-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml index 1d66d1d863..f98472bdf2 100644 --- a/.k8/hetzner-stage/ssv-node-2-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml index 97eb182b6c..2e4cc9792d 100644 --- a/.k8/hetzner-stage/ssv-node-20-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml index f372232436..7e7a28c0fa 100644 --- a/.k8/hetzner-stage/ssv-node-21-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml index 06f8ed6abe..1459d26dc6 100644 --- a/.k8/hetzner-stage/ssv-node-22-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-23-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml index b9bee42755..a5eeac635c 100644 --- a/.k8/hetzner-stage/ssv-node-23-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-24-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml index b2afaef49b..5cb1e41b5f 100644 --- a/.k8/hetzner-stage/ssv-node-24-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-25-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml index 8b8f836456..ccd6e42cf2 100644 --- a/.k8/hetzner-stage/ssv-node-25-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-26-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml index 80db29e49c..396e7360f1 100644 --- a/.k8/hetzner-stage/ssv-node-26-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml index 6353fcd60f..8674533272 100644 --- a/.k8/hetzner-stage/ssv-node-27-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml index da3457f71c..08712b773b 100644 --- a/.k8/hetzner-stage/ssv-node-28-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml index a225fc9d1e..acb427576c 100644 --- a/.k8/hetzner-stage/ssv-node-29-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml index 5727ef1a48..8486b720d0 100644 --- a/.k8/hetzner-stage/ssv-node-3-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml index 82f425d7c3..239bbc7302 100644 --- a/.k8/hetzner-stage/ssv-node-30-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml index 0daf1767e1..af78e460ce 100644 --- a/.k8/hetzner-stage/ssv-node-31-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml index 0bbba3fe2a..d6567ac81e 100644 --- a/.k8/hetzner-stage/ssv-node-32-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml index 2bc8160b1a..6b72d090df 100644 --- a/.k8/hetzner-stage/ssv-node-33-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml index f9bde91d1e..363b7b16d3 100644 --- a/.k8/hetzner-stage/ssv-node-34-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml index 37a070db61..0693b7da9d 100644 --- a/.k8/hetzner-stage/ssv-node-35-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml index 323b0bbf78..65a1566a23 100644 --- a/.k8/hetzner-stage/ssv-node-36-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml index dc10089edb..3c312c1560 100644 --- a/.k8/hetzner-stage/ssv-node-37-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml index 79b47cfc04..ba3e0dacb2 100644 --- a/.k8/hetzner-stage/ssv-node-38-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml index 70fb3f419a..cef15eed57 100644 --- a/.k8/hetzner-stage/ssv-node-39-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml index 0e8185a2c8..758473cb70 100644 --- a/.k8/hetzner-stage/ssv-node-4-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml index d79178ef6f..022eded9fd 100644 --- a/.k8/hetzner-stage/ssv-node-40-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml index 98b7b4276d..b2fc6fcad1 100644 --- a/.k8/hetzner-stage/ssv-node-41-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml index 45c566db31..3664aeca45 100644 --- a/.k8/hetzner-stage/ssv-node-42-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml index 0866dde623..a9cd4f9b95 100644 --- a/.k8/hetzner-stage/ssv-node-43-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml index d1bb327963..01d0e22a17 100644 --- a/.k8/hetzner-stage/ssv-node-44-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml index 159d8c92ba..81c4760282 100644 --- a/.k8/hetzner-stage/ssv-node-45-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml index 269646ca17..57526b672c 100644 --- a/.k8/hetzner-stage/ssv-node-46-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml index 90b2018e04..8d832b2158 100644 --- a/.k8/hetzner-stage/ssv-node-47-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml index 227ae0f11a..3c6fcbc533 100644 --- a/.k8/hetzner-stage/ssv-node-48-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml index 2ecc568451..16c168c0c0 100644 --- a/.k8/hetzner-stage/ssv-node-49-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml index 041f35a3dc..0c4f294174 100644 --- a/.k8/hetzner-stage/ssv-node-5-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml index 5078410bbc..237964637e 100644 --- a/.k8/hetzner-stage/ssv-node-50-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml index 48664649e0..028ac33bde 100644 --- a/.k8/hetzner-stage/ssv-node-51-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml index 860a299915..9f2eb3d888 100644 --- a/.k8/hetzner-stage/ssv-node-52-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml index 5f7e0a001e..68515c515b 100644 --- a/.k8/hetzner-stage/ssv-node-53-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml index fc4e56e47d..9eb12dd56b 100644 --- a/.k8/hetzner-stage/ssv-node-54-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml index ab5df31101..05a109197b 100644 --- a/.k8/hetzner-stage/ssv-node-55-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml index 054a1d46e1..42c0c59b42 100644 --- a/.k8/hetzner-stage/ssv-node-56-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml index ea75a6043c..d2d8945516 100644 --- a/.k8/hetzner-stage/ssv-node-57-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml index 049a3a3112..21401421dd 100644 --- a/.k8/hetzner-stage/ssv-node-58-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml index 1519c403d2..8cefa6ba7b 100644 --- a/.k8/hetzner-stage/ssv-node-59-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml index 8f04d5b19b..6eff03c297 100644 --- a/.k8/hetzner-stage/ssv-node-6-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml index a3362acda5..ca0b3dc8cd 100644 --- a/.k8/hetzner-stage/ssv-node-60-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml index c8ff819411..339c551727 100644 --- a/.k8/hetzner-stage/ssv-node-61-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml index 6d8d4a6ac6..531005618a 100644 --- a/.k8/hetzner-stage/ssv-node-62-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml index a98de28b65..39e261a3bf 100644 --- a/.k8/hetzner-stage/ssv-node-63-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml index 9baafd765a..709fc026fa 100644 --- a/.k8/hetzner-stage/ssv-node-64-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml index cd3cbfeed6..7872f5efef 100644 --- a/.k8/hetzner-stage/ssv-node-65-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml index b8888cb2ae..8cf3d90cfe 100644 --- a/.k8/hetzner-stage/ssv-node-66-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml index 0f5a78da4a..b9620a8b44 100644 --- a/.k8/hetzner-stage/ssv-node-67-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml index 2a72b67d40..b7252d580e 100644 --- a/.k8/hetzner-stage/ssv-node-68-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml index 2a3a6f05fd..6372ddf492 100644 --- a/.k8/hetzner-stage/ssv-node-69-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml index 1e3d450f6e..49101753c1 100644 --- a/.k8/hetzner-stage/ssv-node-7-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml index 8d8caa0823..d9cb6b3604 100644 --- a/.k8/hetzner-stage/ssv-node-70-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml index 919749db9b..cde1e7cd7e 100644 --- a/.k8/hetzner-stage/ssv-node-71-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml index 73dae0d45f..11b639df29 100644 --- a/.k8/hetzner-stage/ssv-node-72-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml index 37f3352ea8..a08bd81e24 100644 --- a/.k8/hetzner-stage/ssv-node-8-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/hetzner-stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml index 3904816442..1dc1e6c2a3 100644 --- a/.k8/hetzner-stage/ssv-node-9-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -98,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/scripts/deploy-cluster-13--16.sh b/.k8/stage/scripts/deploy-cluster-13--16.sh deleted file mode 100755 index 9b3772bdfe..0000000000 --- a/.k8/stage/scripts/deploy-cluster-13--16.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Pleae provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z $9 ]]; then - echo "Please provide health check image" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide nodes cpu limit" - exit 1 -fi - -if [[ -z ${11} ]]; then - echo "Please provide nodes mem limit" - exit 1 -fi - - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -HEALTH_CHECK_IMAGE=$9 -NODES_CPU_LIMIT=${10} -NODES_MEM_LIMIT=${11} - - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $HEALTH_CHECK_IMAGE -echo $NODES_CPU_LIMIT -echo $NODES_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -#config -#if [[ -d .k8/configmaps/ ]]; then -#config - #for file in $(ls -A1 .k8/configmaps/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" - #done -#fi - -#if [[ -d .k8/secrets/ ]]; then - #for file in $(ls -A1 .k8/secrets/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" - #done -#fi - -DIR=".k8/stage" -DEPLOY_FILES=( - "ssv-node-v3-1-deployment.yml" - "ssv-node-v3-2-deployment.yml" - "ssv-node-v3-3-deployment.yml" - "ssv-node-v3-4-deployment.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ - -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ - -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -for file in "${DEPLOY_FILES[@]}"; do - kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done diff --git a/.k8/stage/scripts/deploy-cluster-9--12.sh b/.k8/stage/scripts/deploy-cluster-9--12.sh deleted file mode 100755 index 057b7205af..0000000000 --- a/.k8/stage/scripts/deploy-cluster-9--12.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Pleae provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z $9 ]]; then - echo "Please provide health check image" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide nodes cpu limit" - exit 1 -fi - -if [[ -z ${11} ]]; then - echo "Please provide nodes mem limit" - exit 1 -fi - - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -HEALTH_CHECK_IMAGE=$9 -NODES_CPU_LIMIT=${10} -NODES_MEM_LIMIT=${11} - - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $HEALTH_CHECK_IMAGE -echo $NODES_CPU_LIMIT -echo $NODES_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -#config -#if [[ -d .k8/configmaps/ ]]; then -#config - #for file in $(ls -A1 .k8/configmaps/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" - #done -#fi - -#if [[ -d .k8/secrets/ ]]; then - #for file in $(ls -A1 .k8/secrets/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" - #done -#fi - -DIR=".k8/stage" -DEPLOY_FILES=( - "ssv-node-9-deployment.yml" - "ssv-node-10-deployment.yml" - "ssv-node-11-deployment.yml" - "ssv-node-12-deployment.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ - -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ - -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -for file in "${DEPLOY_FILES[@]}"; do - kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done diff --git a/.k8/stage/scripts/deploy-holesky-exporters.sh b/.k8/stage/scripts/deploy-holesky-exporters.sh index c384e72012..9a899ef3d3 100755 --- a/.k8/stage/scripts/deploy-holesky-exporters.sh +++ b/.k8/stage/scripts/deploy-holesky-exporters.sh @@ -80,7 +80,7 @@ if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( "ssv-exporter-holesky.yml" ) @@ -101,4 +101,4 @@ fi #deploy for file in "${DEPLOY_FILES[@]}"; do kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done \ No newline at end of file +done diff --git a/.k8/stage/ssv-exporter-holesky.yml b/.k8/stage/ssv-exporter-holesky.yml deleted file mode 100644 index a665b13ae5..0000000000 --- a/.k8/stage/ssv-exporter-holesky.yml +++ /dev/null @@ -1,187 +0,0 @@ ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: ssv-exporter-holesky - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ws-exporter-holesky.REPLACE_DOMAIN_SUFFIX" - gateways: - - ssv-exporter-holesky - http: - - route: - - destination: - host: ssv-exporter-holesky - port: - number: 14013 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: ssv-exporter-holesky - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway-int - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ws-exporter-holesky.REPLACE_DOMAIN_SUFFIX" ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-exporter-holesky - namespace: REPLACE_NAMESPACE - labels: - app: ssv-exporter-holesky -spec: - type: ClusterIP - ports: - - port: 12013 - protocol: UDP - targetPort: 12013 - name: port-12013 - - port: 13013 - protocol: TCP - targetPort: 13013 - name: port-13013 - - port: 14013 - protocol: TCP - targetPort: 14013 - name: port-14013 - - port: 15013 - protocol: TCP - targetPort: 15013 - name: port-15013 - - port: 16013 - protocol: TCP - targetPort: 16013 - name: port-16013 - selector: - app: ssv-exporter-holesky ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-exporter-holesky - name: ssv-exporter-holesky - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-exporter-holesky - template: - metadata: - labels: - app: ssv-exporter-holesky - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-exporter-holesky - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_EXPORTER_CPU_LIMIT - memory: REPLACE_EXPORTER_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12013 - name: port-12013 - hostPort: 12013 - protocol: UDP - - containerPort: 13013 - name: port-13013 - hostPort: 13013 - - containerPort: 14013 - name: port-14013 - hostPort: 14013 - - containerPort: 15013 - name: port-15013 - hostPort: 15013 - - containerPort: 16013 - name: port-16013 - hostPort: 16013 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv.*" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15013" - - name: SSV_API_PORT - value: "16013" - - name: ENABLE_PROFILE - value: "true" - - name: UDP_PORT - value: "12013" - - name: TCP_PORT - value: "13003" - - name: WS_API_PORT - value: "14013" - - name: FULLNODE - value: "true" - - name: EXPORTER - value: "true" - - name: DISCOVERY_TRACE - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: SUBNETS - value: "0xffffffffffffffffffffffffffffffff" - volumeMounts: - - mountPath: /data - name: ssv-exporter-holesky - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-exporter-holesky-cm - volumes: - - name: ssv-exporter-holesky - persistentVolumeClaim: - claimName: ssv-exporter-holesky - - name: ssv-exporter-holesky-cm - configMap: - name: ssv-exporter-holesky-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-10-deployment.yml b/.k8/stage/ssv-node-10-deployment.yml deleted file mode 100644 index ce73488cf3..0000000000 --- a/.k8/stage/ssv-node-10-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-10-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-10 -spec: - type: ClusterIP - ports: - - port: 12010 - protocol: UDP - targetPort: 12010 - name: port-12010 - - port: 13010 - protocol: TCP - targetPort: 13010 - name: port-13010 - - port: 15010 - protocol: TCP - targetPort: 15010 - name: port-15010 - - port: 16010 - protocol: TCP - targetPort: 16010 - name: port-16010 - selector: - app: ssv-node-10 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-10 - name: ssv-node-10 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-10 - template: - metadata: - labels: - app: ssv-node-10 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-10 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12010 - name: port-12010 - protocol: UDP - hostPort: 12010 - - containerPort: 13010 - name: port-13010 - hostPort: 13010 - - containerPort: 15010 - name: port-15010 - hostPort: 15010 - - containerPort: 16010 - name: port-16010 - hostPort: 16010 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15010" - - name: SSV_API_PORT - value: "16010" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-10 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-10 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-10 - persistentVolumeClaim: - claimName: ssv-node-10 - - name: ssv-cm-validator-options-10 - configMap: - name: ssv-cm-validator-options-10 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-11-deployment.yml b/.k8/stage/ssv-node-11-deployment.yml deleted file mode 100644 index 2bddd3cdeb..0000000000 --- a/.k8/stage/ssv-node-11-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-11-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-11 -spec: - type: ClusterIP - ports: - - port: 12011 - protocol: UDP - targetPort: 12011 - name: port-12011 - - port: 13011 - protocol: TCP - targetPort: 13011 - name: port-13011 - - port: 15011 - protocol: TCP - targetPort: 15011 - name: port-15011 - - port: 16011 - protocol: TCP - targetPort: 16011 - name: port-16011 - selector: - app: ssv-node-11 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-11 - name: ssv-node-11 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-11 - template: - metadata: - labels: - app: ssv-node-11 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-11 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12011 - name: port-12011 - protocol: UDP - hostPort: 12011 - - containerPort: 13011 - name: port-13011 - hostPort: 13011 - - containerPort: 15011 - name: port-15011 - hostPort: 15011 - - containerPort: 16011 - name: port-16011 - hostPort: 16011 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15011" - - name: SSV_API_PORT - value: "16011" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-11 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-11 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-11 - persistentVolumeClaim: - claimName: ssv-node-11 - - name: ssv-cm-validator-options-11 - configMap: - name: ssv-cm-validator-options-11 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-12-deployment.yml b/.k8/stage/ssv-node-12-deployment.yml deleted file mode 100644 index f06afa878f..0000000000 --- a/.k8/stage/ssv-node-12-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-12-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-12 -spec: - type: ClusterIP - ports: - - port: 12012 - protocol: UDP - targetPort: 12012 - name: port-12012 - - port: 13012 - protocol: TCP - targetPort: 13012 - name: port-13012 - - port: 15012 - protocol: TCP - targetPort: 15012 - name: port-15012 - - port: 16012 - protocol: TCP - targetPort: 16012 - name: port-16012 - selector: - app: ssv-node-12 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-12 - name: ssv-node-12 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-12 - template: - metadata: - labels: - app: ssv-node-12 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-12 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12012 - name: port-12012 - protocol: UDP - hostPort: 12012 - - containerPort: 13012 - name: port-13012 - hostPort: 13012 - - containerPort: 15012 - name: port-15012 - hostPort: 15012 - - containerPort: 16012 - name: port-16012 - hostPort: 16012 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15012" - - name: SSV_API_PORT - value: "16012" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-12 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-12 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-12 - persistentVolumeClaim: - claimName: ssv-node-12 - - name: ssv-cm-validator-options-12 - configMap: - name: ssv-cm-validator-options-12 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-9-deployment.yml b/.k8/stage/ssv-node-9-deployment.yml deleted file mode 100644 index 37098e1a0a..0000000000 --- a/.k8/stage/ssv-node-9-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-9-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-9 -spec: - type: ClusterIP - ports: - - port: 12009 - protocol: UDP - targetPort: 12009 - name: port-12009 - - port: 13009 - protocol: TCP - targetPort: 13009 - name: port-13009 - - port: 15009 - protocol: TCP - targetPort: 15009 - name: port-15009 - - port: 16009 - protocol: TCP - targetPort: 16009 - name: port-16009 - selector: - app: ssv-node-9 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-9 - name: ssv-node-9 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-9 - template: - metadata: - labels: - app: ssv-node-9 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-9 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12009 - name: port-12009 - protocol: UDP - hostPort: 12009 - - containerPort: 13009 - name: port-13009 - hostPort: 13009 - - containerPort: 15009 - name: port-15009 - hostPort: 15009 - - containerPort: 16009 - name: port-16009 - hostPort: 16009 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15009" - - name: SSV_API_PORT - value: "16009" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-9 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-9 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-9 - persistentVolumeClaim: - claimName: ssv-node-9 - - name: ssv-cm-validator-options-9 - configMap: - name: ssv-cm-validator-options-9 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-5-deployment.yml b/.k8/stage/ssv-node-v2-5-deployment.yml index 70a7f89f72..c7446bafdf 100644 --- a/.k8/stage/ssv-node-v2-5-deployment.yml +++ b/.k8/stage/ssv-node-v2-5-deployment.yml @@ -104,9 +104,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-6-deployment.yml b/.k8/stage/ssv-node-v2-6-deployment.yml index 1fddf2a098..b56673db9e 100644 --- a/.k8/stage/ssv-node-v2-6-deployment.yml +++ b/.k8/stage/ssv-node-v2-6-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-7-deployment.yml b/.k8/stage/ssv-node-v2-7-deployment.yml index bb3488b41d..4e61986511 100644 --- a/.k8/stage/ssv-node-v2-7-deployment.yml +++ b/.k8/stage/ssv-node-v2-7-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-8-deployment.yml b/.k8/stage/ssv-node-v2-8-deployment.yml index af3607ba5c..745fb3a3ea 100644 --- a/.k8/stage/ssv-node-v2-8-deployment.yml +++ b/.k8/stage/ssv-node-v2-8-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v3-1-deployment.yml b/.k8/stage/ssv-node-v3-1-deployment.yml deleted file mode 100644 index 59eeab296a..0000000000 --- a/.k8/stage/ssv-node-v3-1-deployment.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-1-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-1 -spec: - type: ClusterIP - ports: - - port: 12301 - protocol: UDP - targetPort: 12301 - name: port-12301 - - port: 13301 - protocol: TCP - targetPort: 13301 - name: port-13301 - - port: 15301 - protocol: TCP - targetPort: 15301 - name: port-15301 - - port: 16301 - protocol: TCP - targetPort: 16301 - name: port-16301 - - port: 16301 - protocol: TCP - targetPort: 16301 - name: port-16301 - selector: - app: ssv-node-v3-1 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-1 - name: ssv-node-v3-1 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-1 - template: - metadata: - labels: - app: ssv-node-v3-1 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-1 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12301 - name: port-12301 - protocol: UDP - hostPort: 12301 - - containerPort: 13301 - name: port-13301 - hostPort: 13301 - - containerPort: 15301 - name: port-15301 - hostPort: 15301 - - containerPort: 16301 - name: port-16301 - hostPort: 16301 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15301" - - name: SSV_API_PORT - value: "16301" - - name: ENABLE_PROFILE - value: "true" - - name: WS_API_PORT - value: "16301" - - name: FULLNODE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-1 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-1 - volumes: - - name: ssv-node-v3-1 - persistentVolumeClaim: - claimName: ssv-node-v3-1 - - name: ssv-cm-validator-options-v3-1 - configMap: - name: ssv-cm-validator-options-v3-1 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-2-deployment.yml b/.k8/stage/ssv-node-v3-2-deployment.yml deleted file mode 100644 index 2daed3c6a8..0000000000 --- a/.k8/stage/ssv-node-v3-2-deployment.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-2-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-2 -spec: - type: ClusterIP - ports: - - port: 12302 - protocol: UDP - targetPort: 12302 - name: port-12302 - - port: 13302 - protocol: TCP - targetPort: 13302 - name: port-13302 - - port: 15302 - protocol: TCP - targetPort: 15302 - name: port-15302 - - port: 16302 - protocol: TCP - targetPort: 16302 - name: port-16302 - selector: - app: ssv-node-v3-2 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-2 - name: ssv-node-v3-2 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-2 - template: - metadata: - labels: - app: ssv-node-v3-2 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12302 - name: port-12302 - protocol: UDP - hostPort: 12302 - - containerPort: 13302 - name: port-13302 - hostPort: 13302 - - containerPort: 15302 - name: port-15302 - hostPort: 15302 - - containerPort: 16302 - name: port-16302 - hostPort: 16302 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15302" - - name: SSV_API_PORT - value: "16302" - - name: ENABLE_PROFILE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-2 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-2 - volumes: - - name: ssv-node-v3-2 - persistentVolumeClaim: - claimName: ssv-node-v3-2 - - name: ssv-cm-validator-options-v3-2 - configMap: - name: ssv-cm-validator-options-v3-2 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-3-deployment.yml b/.k8/stage/ssv-node-v3-3-deployment.yml deleted file mode 100644 index 64bfbbe759..0000000000 --- a/.k8/stage/ssv-node-v3-3-deployment.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-3-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-3 -spec: - type: ClusterIP - ports: - - port: 12303 - protocol: UDP - targetPort: 12303 - name: port-12303 - - port: 13303 - protocol: TCP - targetPort: 13303 - name: port-13303 - - port: 15303 - protocol: TCP - targetPort: 15303 - name: port-15303 - - port: 16303 - protocol: TCP - targetPort: 16303 - name: port-16303 - - port: 16303 - protocol: TCP - targetPort: 16303 - name: port-16303 - selector: - app: ssv-node-v3-3 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-3 - name: ssv-node-v3-3 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-3 - template: - metadata: - labels: - app: ssv-node-v3-3 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-3 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12303 - name: port-12303 - protocol: UDP - hostPort: 12303 - - containerPort: 13303 - name: port-13303 - hostPort: 13303 - - containerPort: 15303 - name: port-15303 - hostPort: 15303 - - containerPort: 16303 - name: port-16303 - hostPort: 16303 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15303" - - name: SSV_API_PORT - value: "16303" - - name: ENABLE_PROFILE - value: "true" - - name: WS_API_PORT - value: "16303" - - name: FULLNODE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-3 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-3 - volumes: - - name: ssv-node-v3-3 - persistentVolumeClaim: - claimName: ssv-node-v3-3 - - name: ssv-cm-validator-options-v3-3 - configMap: - name: ssv-cm-validator-options-v3-3 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-4-deployment.yml b/.k8/stage/ssv-node-v3-4-deployment.yml deleted file mode 100644 index b13efb4f5c..0000000000 --- a/.k8/stage/ssv-node-v3-4-deployment.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-4-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-4 -spec: - type: ClusterIP - ports: - - port: 12304 - protocol: UDP - targetPort: 12304 - name: port-12304 - - port: 13304 - protocol: TCP - targetPort: 13304 - name: port-13304 - - port: 15304 - protocol: TCP - targetPort: 15304 - name: port-15304 - - port: 16304 - protocol: TCP - targetPort: 16304 - name: port-16304 - selector: - app: ssv-node-v3-4 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-4 - name: ssv-node-v3-4 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-4 - template: - metadata: - labels: - app: ssv-node-v3-4 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-4 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12304 - name: port-12304 - protocol: UDP - hostPort: 12304 - - containerPort: 13304 - name: port-13304 - hostPort: 13304 - - containerPort: 15304 - name: port-15304 - hostPort: 15304 - - containerPort: 16304 - name: port-16304 - hostPort: 16304 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15304" - - name: SSV_API_PORT - value: "16304" - - name: ENABLE_PROFILE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-4 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-4 - volumes: - - name: ssv-node-v3-4 - persistentVolumeClaim: - claimName: ssv-node-v3-4 - - name: ssv-cm-validator-options-v3-4 - configMap: - name: ssv-cm-validator-options-v3-4 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true From 314ce4b6940485da87b49121a75c388f03390073 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Sun, 29 Oct 2023 16:18:47 +0200 Subject: [PATCH 23/54] validator registration fix(#1179) * add logs and fix validator registrations. * chore: deploy BUILDER_PROPOSALS to mainnet --------- Co-authored-by: moshe-blox --- .../mainnet/ssv-node-mainnet-1-deployment.yml | 2 ++ .../mainnet/ssv-node-mainnet-2-deployment.yml | 2 ++ .../mainnet/ssv-node-mainnet-3-deployment.yml | 2 ++ .../mainnet/ssv-node-mainnet-4-deployment.yml | 2 ++ operator/duties/validatorregistration.go | 22 ++++++------------- operator/validator/controller.go | 2 +- .../v2/ssv/runner/validator_registration.go | 5 ++++- 7 files changed, 20 insertions(+), 17 deletions(-) diff --git a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml index 7f2616196a..49f3ad5f29 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16017" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-1 diff --git a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml index 2484e7c214..8cfa6f3d6b 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16018" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-2 diff --git a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml index 2b0b836915..1b197f8f51 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16019" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-3 diff --git a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml index 048e021889..5e83a864eb 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16020" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-4 diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index 8a3771f5af..e8b6b79210 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -6,22 +6,16 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" ) const validatorRegistrationEpochInterval = uint64(10) type ValidatorRegistrationHandler struct { baseHandler - - validatorsPassedFirstRegistration map[string]struct{} } func NewValidatorRegistrationHandler() *ValidatorRegistrationHandler { - return &ValidatorRegistrationHandler{ - validatorsPassedFirstRegistration: map[string]struct{}{}, - } + return &ValidatorRegistrationHandler{} } func (h *ValidatorRegistrationHandler) Name() string { @@ -39,8 +33,8 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { case <-h.ticker.Next(): slot := h.ticker.Slot() shares := h.validatorController.GetOperatorShares() - sent := 0 + validators := []phase0.ValidatorIndex{} for _, share := range shares { if !share.HasBeaconMetadata() || !share.BeaconMetadata.IsAttesting() { continue @@ -48,10 +42,7 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { // if not passed first registration, should be registered within one epoch time in a corresponding slot // if passed first registration, should be registered within validatorRegistrationEpochInterval epochs time in a corresponding slot - registrationSlotInterval := h.network.SlotsPerEpoch() - if _, ok := h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)]; ok { - registrationSlotInterval *= validatorRegistrationEpochInterval - } + registrationSlotInterval := h.network.SlotsPerEpoch() * validatorRegistrationEpochInterval if uint64(share.BeaconMetadata.Index)%registrationSlotInterval != uint64(slot)%registrationSlotInterval { continue @@ -67,10 +58,11 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { // no need for other params }}) - sent++ - h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)] = struct{}{} + validators = append(validators, share.BeaconMetadata.Index) } - h.logger.Debug("validator registration duties sent", zap.Uint64("slot", uint64(slot)), fields.Count(sent)) + h.logger.Debug("validator registration duties sent", + zap.Uint64("slot", uint64(slot)), + zap.Any("validators", validators)) case <-h.indicesChange: continue diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 1b45e627e1..604e1fbb55 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -856,7 +856,7 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) case spectypes.BNRoleValidatorRegistration: qbftCtrl := buildController(spectypes.BNRoleValidatorRegistration, nil) - runners[role] = runner.NewValidatorRegistrationRunner(spectypes.PraterNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) + runners[role] = runner.NewValidatorRegistrationRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) } } return runners diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 8cbf53fab2..68bc4351b8 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -2,6 +2,7 @@ package runner import ( "crypto/sha256" + "encoding/hex" "encoding/json" v1 "github.com/attestantio/go-eth2-client/api/v1" @@ -91,7 +92,9 @@ func (r *ValidatorRegistrationRunner) ProcessPreConsensus(logger *zap.Logger, si return errors.Wrap(err, "could not submit validator registration") } - logger.Debug("validator registration submitted successfully", fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:])) + logger.Debug("validator registration submitted successfully", + fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:]), + zap.String("signature", hex.EncodeToString(specSig[:]))) r.GetState().Finished = true return nil From 1d73ed03857cae67b1e6e59d7fdc925867776102 Mon Sep 17 00:00:00 2001 From: moshe-blox Date: Mon, 30 Oct 2023 11:56:50 +0200 Subject: [PATCH 24/54] merge from stage --- .gitlab-ci.yml | 60 + .../scripts/deploy-cluster-1--4.sh | 131 + .../scripts/deploy-cluster-13--16.sh | 10 +- .../scripts/deploy-cluster-17--20.sh | 131 + .../scripts/deploy-cluster-21--24.sh | 131 + .../scripts/deploy-cluster-25--28.sh | 131 + .../scripts/deploy-cluster-29--32.sh | 131 + .../scripts/deploy-cluster-33--36.sh | 131 + .../scripts/deploy-cluster-37--40.sh | 131 + .../scripts/deploy-cluster-41--44.sh | 131 + .../scripts/deploy-cluster-45--48.sh | 131 + .../scripts/deploy-cluster-49--52.sh | 131 + .../scripts/deploy-cluster-5--8.sh | 131 + .../scripts/deploy-cluster-53--56.sh | 131 + .../scripts/deploy-cluster-57--60.sh | 131 + .../scripts/deploy-cluster-61--64.sh | 131 + .../scripts/deploy-cluster-65--68.sh | 131 + .../scripts/deploy-cluster-69--72.sh | 131 + .../scripts/deploy-cluster-9--12.sh | 2 +- .../scripts/deploy-holesky-exporters.sh | 104 + .k8/hetzner-stage/ssv-exporter-holesky.yml | 152 ++ .k8/hetzner-stage/ssv-node-1-deployment.yml | 133 + .../ssv-node-10-deployment.yml | 46 +- .../ssv-node-11-deployment.yml | 46 +- .../ssv-node-12-deployment.yml | 46 +- .k8/hetzner-stage/ssv-node-13-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-14-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-15-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-16-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-17-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-18-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-19-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-2-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-20-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-21-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-22-deployment.yml | 133 + .../ssv-node-23-deployment.yml} | 124 +- .../ssv-node-24-deployment.yml} | 116 +- .../ssv-node-25-deployment.yml} | 124 +- .../ssv-node-26-deployment.yml} | 116 +- .k8/hetzner-stage/ssv-node-27-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-28-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-29-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-3-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-30-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-31-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-32-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-33-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-34-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-35-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-36-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-37-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-38-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-39-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-4-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-40-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-41-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-42-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-43-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-44-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-45-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-46-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-47-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-48-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-49-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-5-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-50-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-51-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-52-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-53-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-54-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-55-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-56-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-57-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-58-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-59-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-6-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-60-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-61-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-62-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-63-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-64-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-65-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-66-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-67-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-68-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-69-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-7-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-70-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-71-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-72-deployment.yml | 133 + .k8/hetzner-stage/ssv-node-8-deployment.yml | 133 + .../ssv-node-9-deployment.yml | 46 +- .../mainnet/ssv-node-mainnet-1-deployment.yml | 2 + .../mainnet/ssv-node-mainnet-2-deployment.yml | 2 + .../mainnet/ssv-node-mainnet-3-deployment.yml | 2 + .../mainnet/ssv-node-mainnet-4-deployment.yml | 2 + .k8/stage/scripts/deploy-holesky-exporters.sh | 104 + .k8/stage/ssv-node-v2-1-deployment.yml | 4 +- .k8/stage/ssv-node-v2-2-deployment.yml | 4 +- .k8/stage/ssv-node-v2-3-deployment.yml | 4 +- .k8/stage/ssv-node-v2-4-deployment.yml | 4 +- .k8/stage/ssv-node-v2-5-deployment.yml | 6 +- .k8/stage/ssv-node-v2-6-deployment.yml | 4 +- .k8/stage/ssv-node-v2-7-deployment.yml | 6 +- .k8/stage/ssv-node-v2-8-deployment.yml | 4 +- Dockerfile | 3 +- beacon/goclient/goclient.go | 10 +- beacon/goclient/proposer.go | 13 +- beacon/goclient/voluntary_exit.go | 10 + cli/operator/node.go | 91 +- docs/OPERATOR_GETTING_STARTED.md | 3 +- ekm/eth_key_manager_signer.go | 163 +- ekm/signer_key_manager_test.go | 55 +- ekm/signer_storage.go | 14 +- eth/ethtest/cluster_liquidated_test.go | 91 + eth/ethtest/cluster_reactivated_test.go | 87 + eth/ethtest/common_test.go | 231 ++ eth/ethtest/eth_e2e_test.go | 309 +++ eth/ethtest/operator_added_test.go | 86 + eth/ethtest/operator_removed_test.go | 83 + eth/ethtest/set_fee_recipient_test.go | 80 + eth/ethtest/utils_test.go | 300 +++ eth/ethtest/validator_added_test.go | 134 + eth/ethtest/validator_removed_test.go | 104 + eth/eventhandler/event_handler.go | 12 +- eth/eventhandler/event_handler_test.go | 882 ++++++- eth/eventhandler/handlers.go | 51 +- eth/eventhandler/local_events_test.go | 6 +- eth/eventhandler/task.go | 43 +- eth/eventhandler/task_executor_test.go | 13 +- eth/eventsyncer/event_syncer_test.go | 2 + eth/executionclient/execution_client_test.go | 14 +- eth/simulator/simcontract/simcontract.go | 2 +- eth/simulator/simcontract/simcontract.sol | 33 +- go.mod | 7 +- go.sum | 8 +- integration/qbft/tests/msg_router.go | 15 +- integration/qbft/tests/round_change_test.go | 9 +- integration/qbft/tests/scenario_test.go | 16 +- logging/fields/fields.go | 5 + logging/names.go | 1 + logging/testing.go | 5 +- message/validation/consensus_validation.go | 434 ++++ .../validation/consensus_validation_test.go | 104 + message/validation/errors.go | 100 + message/validation/message_counts.go | 156 ++ message/validation/metrics.go | 38 + message/validation/partial_validation.go | 251 ++ message/validation/qbft_config.go | 53 + message/validation/signer_state.go | 45 + message/validation/validation.go | 556 +++++ message/validation/validation_test.go | 1774 ++++++++++++++ migrations/migration_2_encrypt_shares.go | 1 + .../grafana/dashboard_msg_validation.json | 2175 +++++++++++++++++ monitoring/grafana/dashboard_ssv_node.json | 291 ++- .../dashboard_ssv_operator_performance.json | 20 +- .../metricsreporter/metrics_reporter.go | 239 +- network/network.go | 6 +- network/p2p/config.go | 6 + network/p2p/p2p.go | 34 +- network/p2p/p2p_pubsub.go | 25 +- network/p2p/p2p_setup.go | 18 +- network/p2p/p2p_sync.go | 126 +- network/p2p/p2p_test.go | 57 +- network/p2p/test_utils.go | 3 + network/syncing/concurrent.go | 189 -- network/syncing/concurrent_test.go | 148 -- network/syncing/mocks/syncer.go | 127 - network/syncing/syncer.go | 207 -- network/syncing/syncer_test.go | 34 - network/topics/controller.go | 48 +- network/topics/controller_test.go | 287 ++- network/topics/metrics.go | 26 +- network/topics/msg_validator.go | 67 - network/topics/msg_validator_test.go | 108 +- network/topics/params/gossipsub.go | 2 +- network/topics/params/topic_score.go | 5 +- network/topics/pubsub.go | 51 +- network/topics/scoring.go | 2 +- networkconfig/config.go | 6 + networkconfig/holesky-stage.go | 22 + operator/duties/attester.go | 53 +- operator/duties/attester_test.go | 192 +- operator/duties/base_handler.go | 32 +- operator/duties/base_handler_mock.go | 4 +- operator/duties/dutystore/duties.go | 97 + operator/duties/dutystore/store.go | 19 + operator/duties/dutystore/sync_committee.go | 76 + operator/duties/mocks/scheduler.go | 56 +- operator/duties/proposer.go | 56 +- operator/duties/proposer_test.go | 110 +- operator/duties/scheduler.go | 59 +- operator/duties/scheduler_test.go | 87 +- .../{synccommittee.go => sync_committee.go} | 81 +- ...mmittee_test.go => sync_committee_test.go} | 44 +- operator/duties/validatorregistration.go | 27 +- operator/fee_recipient/controller.go | 59 +- operator/fee_recipient/controller_test.go | 66 +- operator/node.go | 34 +- operator/slot_ticker/mocks/ticker.go | 63 - operator/slot_ticker/slotticker.go | 88 - operator/slot_ticker/ticker.go | 84 - operator/slotticker/mocks/slotticker.go | 115 + operator/slotticker/slotticker.go | 96 + operator/slotticker/slotticker_test.go | 179 ++ operator/validator/controller.go | 217 +- operator/validator/controller_test.go | 67 +- operator/validator/metrics.go | 28 - operator/validator/mocks/controller.go | 34 +- operator/validator/router.go | 24 +- operator/validator/router_test.go | 20 +- operator/validator/task_executor.go | 45 +- operator/validator/validators_map.go | 126 - operator/validatorsmap/validators_map.go | 110 + protocol/v2/blockchain/beacon/mock_client.go | 14 + .../v2/blockchain/beacon/mocks/network.go | 14 + protocol/v2/blockchain/beacon/network.go | 6 + protocol/v2/blockchain/beacon/network_test.go | 19 + protocol/v2/p2p/network.go | 19 +- protocol/v2/qbft/config.go | 29 +- protocol/v2/qbft/controller/controller.go | 35 +- .../v2/qbft/controller/controller_test.go | 68 +- protocol/v2/qbft/controller/decided.go | 2 - protocol/v2/qbft/controller/future_msg.go | 76 - protocol/v2/qbft/controller/timer.go | 9 +- protocol/v2/qbft/instance/commit.go | 7 +- protocol/v2/qbft/instance/instance.go | 12 +- protocol/v2/qbft/instance/marshalutils.go | 47 + protocol/v2/qbft/instance/metrics.go | 4 +- protocol/v2/qbft/instance/prepare.go | 6 +- protocol/v2/qbft/instance/proposal.go | 36 +- protocol/v2/qbft/instance/round_change.go | 19 +- protocol/v2/qbft/instance/timeout.go | 5 +- protocol/v2/qbft/roundtimer/mocks/timer.go | 100 + protocol/v2/qbft/roundtimer/testing_timer.go | 23 + protocol/v2/qbft/roundtimer/timer.go | 136 +- protocol/v2/qbft/roundtimer/timer_test.go | 184 +- .../v2/qbft/spectest/controller_sync_type.go | 55 - protocol/v2/qbft/spectest/controller_type.go | 74 +- .../v2/qbft/spectest/msg_processing_type.go | 27 +- .../v2/qbft/spectest/qbft_mapping_test.go | 25 +- protocol/v2/qbft/spectest/timeout_type.go | 5 +- protocol/v2/qbft/testing/utils.go | 14 +- protocol/v2/queue/worker/message_worker.go | 19 +- .../v2/queue/worker/message_worker_test.go | 14 +- .../v2/ssv/queue/message_prioritizer_test.go | 3 +- protocol/v2/ssv/queue/messages.go | 14 +- protocol/v2/ssv/queue/metrics.go | 37 +- protocol/v2/ssv/queue/queue_test.go | 30 +- protocol/v2/ssv/runner/runner.go | 14 +- protocol/v2/ssv/runner/runner_signatures.go | 17 +- protocol/v2/ssv/runner/timer.go | 2 +- .../v2/ssv/runner/validator_registration.go | 13 +- protocol/v2/ssv/runner/voluntary_exit.go | 232 ++ .../v2/ssv/spectest/msg_processing_type.go | 58 +- .../ssv/spectest/multi_msg_processing_type.go | 29 +- .../multi_start_new_runner_duty_type.go | 70 +- protocol/v2/ssv/spectest/ssv_mapping_test.go | 193 +- .../sync_committee_aggregator_proof_type.go | 2 +- protocol/v2/ssv/testing/runner.go | 16 +- protocol/v2/ssv/testing/validator.go | 3 +- protocol/v2/ssv/validator/metrics.go | 45 + .../v2/ssv/validator/msgqueue_consumer.go | 16 +- .../ssv/validator/non_committee_validator.go | 10 +- protocol/v2/ssv/validator/opts.go | 7 +- protocol/v2/ssv/validator/startup.go | 27 - protocol/v2/ssv/validator/timer.go | 16 +- protocol/v2/ssv/validator/validator.go | 37 +- protocol/v2/sync/handlers/decided_history.go | 57 - protocol/v2/sync/handlers/last_decided.go | 53 - protocol/v2/testing/test_utils.go | 27 +- protocol/v2/types/bls.go | 2 +- protocol/v2/types/crypto.go | 5 - protocol/v2/types/messages.go | 9 +- registry/storage/shares.go | 7 + scripts/spec-alignment/differ.config.yaml | 6 +- utils/rsaencryption/testingspace/vars.go | 1 + utils/testutils.go | 55 + 279 files changed, 23587 insertions(+), 3657 deletions(-) create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh rename .k8/{stage => hetzner-stage}/scripts/deploy-cluster-13--16.sh (94%) create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh create mode 100755 .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh rename .k8/{stage => hetzner-stage}/scripts/deploy-cluster-9--12.sh (99%) create mode 100755 .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh create mode 100644 .k8/hetzner-stage/ssv-exporter-holesky.yml create mode 100644 .k8/hetzner-stage/ssv-node-1-deployment.yml rename .k8/{stage => hetzner-stage}/ssv-node-10-deployment.yml (70%) rename .k8/{stage => hetzner-stage}/ssv-node-11-deployment.yml (70%) rename .k8/{stage => hetzner-stage}/ssv-node-12-deployment.yml (70%) create mode 100644 .k8/hetzner-stage/ssv-node-13-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-14-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-15-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-16-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-17-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-18-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-19-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-2-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-20-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-21-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-22-deployment.yml rename .k8/{stage/ssv-node-v3-1-deployment.yml => hetzner-stage/ssv-node-23-deployment.yml} (52%) rename .k8/{stage/ssv-node-v3-2-deployment.yml => hetzner-stage/ssv-node-24-deployment.yml} (55%) rename .k8/{stage/ssv-node-v3-3-deployment.yml => hetzner-stage/ssv-node-25-deployment.yml} (52%) rename .k8/{stage/ssv-node-v3-4-deployment.yml => hetzner-stage/ssv-node-26-deployment.yml} (55%) create mode 100644 .k8/hetzner-stage/ssv-node-27-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-28-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-29-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-3-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-30-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-31-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-32-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-33-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-34-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-35-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-36-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-37-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-38-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-39-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-4-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-40-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-41-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-42-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-43-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-44-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-45-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-46-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-47-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-48-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-49-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-5-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-50-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-51-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-52-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-53-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-54-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-55-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-56-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-57-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-58-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-59-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-6-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-60-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-61-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-62-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-63-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-64-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-65-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-66-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-67-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-68-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-69-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-7-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-70-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-71-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-72-deployment.yml create mode 100644 .k8/hetzner-stage/ssv-node-8-deployment.yml rename .k8/{stage => hetzner-stage}/ssv-node-9-deployment.yml (70%) create mode 100755 .k8/stage/scripts/deploy-holesky-exporters.sh create mode 100644 beacon/goclient/voluntary_exit.go create mode 100644 eth/ethtest/cluster_liquidated_test.go create mode 100644 eth/ethtest/cluster_reactivated_test.go create mode 100644 eth/ethtest/common_test.go create mode 100644 eth/ethtest/eth_e2e_test.go create mode 100644 eth/ethtest/operator_added_test.go create mode 100644 eth/ethtest/operator_removed_test.go create mode 100644 eth/ethtest/set_fee_recipient_test.go create mode 100644 eth/ethtest/utils_test.go create mode 100644 eth/ethtest/validator_added_test.go create mode 100644 eth/ethtest/validator_removed_test.go create mode 100644 message/validation/consensus_validation.go create mode 100644 message/validation/consensus_validation_test.go create mode 100644 message/validation/errors.go create mode 100644 message/validation/message_counts.go create mode 100644 message/validation/metrics.go create mode 100644 message/validation/partial_validation.go create mode 100644 message/validation/qbft_config.go create mode 100644 message/validation/signer_state.go create mode 100644 message/validation/validation.go create mode 100644 message/validation/validation_test.go create mode 100644 monitoring/grafana/dashboard_msg_validation.json delete mode 100644 network/syncing/concurrent.go delete mode 100644 network/syncing/concurrent_test.go delete mode 100644 network/syncing/mocks/syncer.go delete mode 100644 network/syncing/syncer.go delete mode 100644 network/syncing/syncer_test.go delete mode 100644 network/topics/msg_validator.go create mode 100644 networkconfig/holesky-stage.go create mode 100644 operator/duties/dutystore/duties.go create mode 100644 operator/duties/dutystore/store.go create mode 100644 operator/duties/dutystore/sync_committee.go rename operator/duties/{synccommittee.go => sync_committee.go} (84%) rename operator/duties/{synccommittee_test.go => sync_committee_test.go} (94%) delete mode 100644 operator/slot_ticker/mocks/ticker.go delete mode 100644 operator/slot_ticker/slotticker.go delete mode 100644 operator/slot_ticker/ticker.go create mode 100644 operator/slotticker/mocks/slotticker.go create mode 100644 operator/slotticker/slotticker.go create mode 100644 operator/slotticker/slotticker_test.go delete mode 100644 operator/validator/validators_map.go create mode 100644 operator/validatorsmap/validators_map.go create mode 100644 protocol/v2/blockchain/beacon/network_test.go delete mode 100644 protocol/v2/qbft/controller/future_msg.go create mode 100644 protocol/v2/qbft/instance/marshalutils.go create mode 100644 protocol/v2/qbft/roundtimer/mocks/timer.go create mode 100644 protocol/v2/qbft/roundtimer/testing_timer.go delete mode 100644 protocol/v2/qbft/spectest/controller_sync_type.go create mode 100644 protocol/v2/ssv/runner/voluntary_exit.go create mode 100644 protocol/v2/ssv/validator/metrics.go delete mode 100644 protocol/v2/sync/handlers/decided_history.go delete mode 100644 protocol/v2/sync/handlers/last_decided.go create mode 100644 utils/testutils.go diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index afcc42e934..f296289e6e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -88,6 +88,66 @@ Deploy exporter to stage: - stage +# +---------------------+ +# | STAGE HETZNER NODES | +# +---------------------+ + + +Deploy nodes to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT + - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + # + # +--------------------+ + # | Deploy SSV nodes | + # +--------------------+ + - .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + only: + - stage + +Deploy exporter to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT + - export SSV_EXPORTER_MEM_LIMIT=$STAGE_SSV_EXPORTER_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + - .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + only: + - stage + # +---------------+ # | Prod | # +---------------+ diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh new file mode 100755 index 0000000000..f2a8669b7d --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-1-deployment.yml" + "ssv-node-2-deployment.yml" + "ssv-node-3-deployment.yml" + "ssv-node-4-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-cluster-13--16.sh b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh similarity index 94% rename from .k8/stage/scripts/deploy-cluster-13--16.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh index 9b3772bdfe..1de999f0e8 100755 --- a/.k8/stage/scripts/deploy-cluster-13--16.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh @@ -103,12 +103,12 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( - "ssv-node-v3-1-deployment.yml" - "ssv-node-v3-2-deployment.yml" - "ssv-node-v3-3-deployment.yml" - "ssv-node-v3-4-deployment.yml" + "ssv-node-13-deployment.yml" + "ssv-node-14-deployment.yml" + "ssv-node-15-deployment.yml" + "ssv-node-16-deployment.yml" ) if [[ -d $DIR ]]; then diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh new file mode 100755 index 0000000000..812a48e3f6 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-17-deployment.yml" + "ssv-node-18-deployment.yml" + "ssv-node-19-deployment.yml" + "ssv-node-20-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh new file mode 100755 index 0000000000..57c89f2fdd --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-21-deployment.yml" + "ssv-node-22-deployment.yml" + "ssv-node-23-deployment.yml" + "ssv-node-24-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh new file mode 100755 index 0000000000..134e83dad8 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-25-deployment.yml" + "ssv-node-26-deployment.yml" + "ssv-node-27-deployment.yml" + "ssv-node-28-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh new file mode 100755 index 0000000000..6e721e8342 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-29-deployment.yml" + "ssv-node-30-deployment.yml" + "ssv-node-31-deployment.yml" + "ssv-node-32-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh new file mode 100755 index 0000000000..deb2d911e5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-33-deployment.yml" + "ssv-node-34-deployment.yml" + "ssv-node-35-deployment.yml" + "ssv-node-36-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh new file mode 100755 index 0000000000..c82c77ce42 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-37-deployment.yml" + "ssv-node-38-deployment.yml" + "ssv-node-39-deployment.yml" + "ssv-node-40-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh new file mode 100755 index 0000000000..c4684e685e --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-41-deployment.yml" + "ssv-node-42-deployment.yml" + "ssv-node-43-deployment.yml" + "ssv-node-44-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh new file mode 100755 index 0000000000..11a54c9722 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-45-deployment.yml" + "ssv-node-46-deployment.yml" + "ssv-node-47-deployment.yml" + "ssv-node-48-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh new file mode 100755 index 0000000000..dcc90d2742 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-49-deployment.yml" + "ssv-node-50-deployment.yml" + "ssv-node-51-deployment.yml" + "ssv-node-52-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh new file mode 100755 index 0000000000..e3bb9e94a2 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-5-deployment.yml" + "ssv-node-6-deployment.yml" + "ssv-node-7-deployment.yml" + "ssv-node-8-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh new file mode 100755 index 0000000000..9efd728b17 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-53-deployment.yml" + "ssv-node-54-deployment.yml" + "ssv-node-55-deployment.yml" + "ssv-node-56-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh new file mode 100755 index 0000000000..1be68e57f5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-57-deployment.yml" + "ssv-node-58-deployment.yml" + "ssv-node-59-deployment.yml" + "ssv-node-60-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh new file mode 100755 index 0000000000..2fc32263a0 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-61-deployment.yml" + "ssv-node-62-deployment.yml" + "ssv-node-63-deployment.yml" + "ssv-node-64-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh new file mode 100755 index 0000000000..fe57c84c75 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-65-deployment.yml" + "ssv-node-66-deployment.yml" + "ssv-node-67-deployment.yml" + "ssv-node-68-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh new file mode 100755 index 0000000000..229536c0d4 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-69-deployment.yml" + "ssv-node-70-deployment.yml" + "ssv-node-71-deployment.yml" + "ssv-node-72-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-cluster-9--12.sh b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh similarity index 99% rename from .k8/stage/scripts/deploy-cluster-9--12.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh index 057b7205af..81fe2de698 100755 --- a/.k8/stage/scripts/deploy-cluster-9--12.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh @@ -103,7 +103,7 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( "ssv-node-9-deployment.yml" "ssv-node-10-deployment.yml" diff --git a/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh new file mode 100755 index 0000000000..9a899ef3d3 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/ssv-exporter-holesky.yml b/.k8/hetzner-stage/ssv-exporter-holesky.yml new file mode 100644 index 0000000000..10fb398390 --- /dev/null +++ b/.k8/hetzner-stage/ssv-exporter-holesky.yml @@ -0,0 +1,152 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE + labels: + app: ssv-exporter-holesky +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 14013 + protocol: TCP + targetPort: 14013 + name: port-14013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-exporter-holesky +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-exporter-holesky + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-exporter-holesky + template: + metadata: + labels: + app: ssv-exporter-holesky + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-exporter + containers: + - name: ssv-exporter-holesky + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_EXPORTER_CPU_LIMIT + memory: REPLACE_EXPORTER_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + hostPort: 12013 + protocol: UDP + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 14013 + name: port-14013 + hostPort: 14013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv.*" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: UDP_PORT + value: "12013" + - name: TCP_PORT + value: "13003" + - name: WS_API_PORT + value: "14013" + - name: FULLNODE + value: "true" + - name: EXPORTER + value: "true" + - name: DISCOVERY_TRACE + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: SUBNETS + value: "0xffffffffffffffffffffffffffffffff" + volumeMounts: + - mountPath: /data + name: ssv-exporter-holesky + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-exporter-holesky-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-exporter-holesky + persistentVolumeClaim: + claimName: ssv-exporter-holesky + - name: ssv-exporter-holesky-cm + configMap: + name: ssv-exporter-holesky-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml new file mode 100644 index 0000000000..9b11ffbce6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-1-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-1 +spec: + type: ClusterIP + ports: + - port: 12001 + protocol: UDP + targetPort: 12001 + name: port-12001 + - port: 13001 + protocol: TCP + targetPort: 13001 + name: port-13001 + - port: 15001 + protocol: TCP + targetPort: 15001 + name: port-15001 + - port: 16001 + protocol: TCP + targetPort: 16001 + name: port-16001 + selector: + app: ssv-node-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-1 + name: ssv-node-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-1 + template: + metadata: + labels: + app: ssv-node-1 + spec: + containers: + - name: ssv-node-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12001 + name: port-12001 + hostPort: 12001 + protocol: UDP + - containerPort: 13001 + name: port-13001 + hostPort: 13001 + - containerPort: 15001 + name: port-15001 + hostPort: 15001 + - containerPort: 16001 + name: port-16001 + hostPort: 16001 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15001" + - name: SSV_API_PORT + value: "16001" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-1-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-1 + persistentVolumeClaim: + claimName: ssv-node-1 + - name: ssv-node-1-cm + configMap: + name: ssv-node-1-cm + hostNetwork: true diff --git a/.k8/stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-10-deployment.yml rename to .k8/hetzner-stage/ssv-node-10-deployment.yml index ce73488cf3..051cf589d4 100644 --- a/.k8/stage/ssv-node-10-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-10 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-10 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-10 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-10 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-10 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-10-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-10 persistentVolumeClaim: claimName: ssv-node-10 - - name: ssv-cm-validator-options-10 + - name: ssv-node-10-cm configMap: - name: ssv-cm-validator-options-10 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-10-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-11-deployment.yml rename to .k8/hetzner-stage/ssv-node-11-deployment.yml index 2bddd3cdeb..e15bdb7b49 100644 --- a/.k8/stage/ssv-node-11-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-11 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-11 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-11 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-11 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-11 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-11-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-11 persistentVolumeClaim: claimName: ssv-node-11 - - name: ssv-cm-validator-options-11 + - name: ssv-node-11-cm configMap: - name: ssv-cm-validator-options-11 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-11-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-12-deployment.yml rename to .k8/hetzner-stage/ssv-node-12-deployment.yml index f06afa878f..ebcc12a1ac 100644 --- a/.k8/stage/ssv-node-12-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-12 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-12 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-12 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-12 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-12 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-12-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-12 persistentVolumeClaim: claimName: ssv-node-12 - - name: ssv-cm-validator-options-12 + - name: ssv-node-12-cm configMap: - name: ssv-cm-validator-options-12 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-12-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml new file mode 100644 index 0000000000..53f1bae513 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-13-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-13 +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-node-13 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-13 + name: ssv-node-13 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-13 + template: + metadata: + labels: + app: ssv-node-13 + spec: + containers: + - name: ssv-node-13 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + protocol: UDP + hostPort: 12013 + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-13 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-13-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-13 + persistentVolumeClaim: + claimName: ssv-node-13 + - name: ssv-node-13-cm + configMap: + name: ssv-node-13-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml new file mode 100644 index 0000000000..65f47bc363 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-14-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-14 +spec: + type: ClusterIP + ports: + - port: 12014 + protocol: UDP + targetPort: 12014 + name: port-12014 + - port: 13014 + protocol: TCP + targetPort: 13014 + name: port-13014 + - port: 15014 + protocol: TCP + targetPort: 15014 + name: port-15014 + - port: 16014 + protocol: TCP + targetPort: 16014 + name: port-16014 + selector: + app: ssv-node-14 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-14 + name: ssv-node-14 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-14 + template: + metadata: + labels: + app: ssv-node-14 + spec: + containers: + - name: ssv-node-14 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12014 + name: port-12014 + protocol: UDP + hostPort: 12014 + - containerPort: 13014 + name: port-13014 + hostPort: 13014 + - containerPort: 15014 + name: port-15014 + hostPort: 15014 + - containerPort: 16014 + name: port-16014 + hostPort: 16014 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15014" + - name: SSV_API_PORT + value: "16014" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-14 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-14-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-14 + persistentVolumeClaim: + claimName: ssv-node-14 + - name: ssv-node-14-cm + configMap: + name: ssv-node-14-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml new file mode 100644 index 0000000000..ec59df9720 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-15-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-15 +spec: + type: ClusterIP + ports: + - port: 12015 + protocol: UDP + targetPort: 12015 + name: port-12015 + - port: 13015 + protocol: TCP + targetPort: 13015 + name: port-13015 + - port: 15015 + protocol: TCP + targetPort: 15015 + name: port-15015 + - port: 16015 + protocol: TCP + targetPort: 16015 + name: port-16015 + selector: + app: ssv-node-15 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-15 + name: ssv-node-15 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-15 + template: + metadata: + labels: + app: ssv-node-15 + spec: + containers: + - name: ssv-node-15 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12015 + name: port-12015 + protocol: UDP + hostPort: 12015 + - containerPort: 13015 + name: port-13015 + hostPort: 13015 + - containerPort: 15015 + name: port-15015 + hostPort: 15015 + - containerPort: 16015 + name: port-16015 + hostPort: 16015 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15015" + - name: SSV_API_PORT + value: "16015" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-15 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-15-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-15 + persistentVolumeClaim: + claimName: ssv-node-15 + - name: ssv-node-15-cm + configMap: + name: ssv-node-15-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml new file mode 100644 index 0000000000..f25f60b70c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-16-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-16 +spec: + type: ClusterIP + ports: + - port: 12016 + protocol: UDP + targetPort: 12016 + name: port-12016 + - port: 13016 + protocol: TCP + targetPort: 13016 + name: port-13016 + - port: 15016 + protocol: TCP + targetPort: 15016 + name: port-15016 + - port: 16016 + protocol: TCP + targetPort: 16016 + name: port-16016 + selector: + app: ssv-node-16 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-16 + name: ssv-node-16 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-16 + template: + metadata: + labels: + app: ssv-node-16 + spec: + containers: + - name: ssv-node-16 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12016 + name: port-12016 + protocol: UDP + hostPort: 12016 + - containerPort: 13016 + name: port-13016 + hostPort: 13016 + - containerPort: 15016 + name: port-15016 + hostPort: 15016 + - containerPort: 16016 + name: port-16016 + hostPort: 16016 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15016" + - name: SSV_API_PORT + value: "16016" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-16 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-16-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-16 + persistentVolumeClaim: + claimName: ssv-node-16 + - name: ssv-node-16-cm + configMap: + name: ssv-node-16-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml new file mode 100644 index 0000000000..14561ef74c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-17-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-17 +spec: + type: ClusterIP + ports: + - port: 12017 + protocol: UDP + targetPort: 12017 + name: port-12017 + - port: 13017 + protocol: TCP + targetPort: 13017 + name: port-13017 + - port: 15017 + protocol: TCP + targetPort: 15017 + name: port-15017 + - port: 16017 + protocol: TCP + targetPort: 16017 + name: port-16017 + selector: + app: ssv-node-17 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-17 + name: ssv-node-17 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-17 + template: + metadata: + labels: + app: ssv-node-17 + spec: + containers: + - name: ssv-node-17 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12017 + name: port-12017 + protocol: UDP + hostPort: 12017 + - containerPort: 13017 + name: port-13017 + hostPort: 13017 + - containerPort: 15017 + name: port-15017 + hostPort: 15017 + - containerPort: 16017 + name: port-16017 + hostPort: 16017 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15017" + - name: SSV_API_PORT + value: "16017" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-17 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-17-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-17 + persistentVolumeClaim: + claimName: ssv-node-17 + - name: ssv-node-17-cm + configMap: + name: ssv-node-17-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml new file mode 100644 index 0000000000..40ac470dd3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-18-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-18 +spec: + type: ClusterIP + ports: + - port: 12018 + protocol: UDP + targetPort: 12018 + name: port-12018 + - port: 13018 + protocol: TCP + targetPort: 13018 + name: port-13018 + - port: 15018 + protocol: TCP + targetPort: 15018 + name: port-15018 + - port: 16018 + protocol: TCP + targetPort: 16018 + name: port-16018 + selector: + app: ssv-node-18 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-18 + name: ssv-node-18 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-18 + template: + metadata: + labels: + app: ssv-node-18 + spec: + containers: + - name: ssv-node-18 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12018 + name: port-12018 + protocol: UDP + hostPort: 12018 + - containerPort: 13018 + name: port-13018 + hostPort: 13018 + - containerPort: 15018 + name: port-15018 + hostPort: 15018 + - containerPort: 16018 + name: port-16018 + hostPort: 16018 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15018" + - name: SSV_API_PORT + value: "16018" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-18 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-18-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-18 + persistentVolumeClaim: + claimName: ssv-node-18 + - name: ssv-node-18-cm + configMap: + name: ssv-node-18-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml new file mode 100644 index 0000000000..a266c88e48 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-19-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-19 +spec: + type: ClusterIP + ports: + - port: 12019 + protocol: UDP + targetPort: 12019 + name: port-12019 + - port: 13019 + protocol: TCP + targetPort: 13019 + name: port-13019 + - port: 15019 + protocol: TCP + targetPort: 15019 + name: port-15019 + - port: 16019 + protocol: TCP + targetPort: 16019 + name: port-16019 + selector: + app: ssv-node-19 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-19 + name: ssv-node-19 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-19 + template: + metadata: + labels: + app: ssv-node-19 + spec: + containers: + - name: ssv-node-19 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12019 + name: port-12019 + protocol: UDP + hostPort: 12019 + - containerPort: 13019 + name: port-13019 + hostPort: 13019 + - containerPort: 15019 + name: port-15019 + hostPort: 15019 + - containerPort: 16019 + name: port-16019 + hostPort: 16019 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15019" + - name: SSV_API_PORT + value: "16019" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-19 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-19-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-19 + persistentVolumeClaim: + claimName: ssv-node-19 + - name: ssv-node-19-cm + configMap: + name: ssv-node-19-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml new file mode 100644 index 0000000000..f98472bdf2 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-2-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-2 +spec: + type: ClusterIP + ports: + - port: 12002 + protocol: UDP + targetPort: 12002 + name: port-12002 + - port: 13002 + protocol: TCP + targetPort: 13002 + name: port-13002 + - port: 15002 + protocol: TCP + targetPort: 15002 + name: port-15002 + - port: 16002 + protocol: TCP + targetPort: 16002 + name: port-16002 + selector: + app: ssv-node-2 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-2 + name: ssv-node-2 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-2 + template: + metadata: + labels: + app: ssv-node-2 + spec: + containers: + - name: ssv-node-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12002 + name: port-12002 + protocol: UDP + hostPort: 12002 + - containerPort: 13002 + name: port-13002 + hostPort: 13002 + - containerPort: 15002 + name: port-15002 + hostPort: 15002 + - containerPort: 16002 + name: port-16002 + hostPort: 16002 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15002" + - name: SSV_API_PORT + value: "16002" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-2 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-2-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-2 + persistentVolumeClaim: + claimName: ssv-node-2 + - name: ssv-node-2-cm + configMap: + name: ssv-node-2-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml new file mode 100644 index 0000000000..2e4cc9792d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-20-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-20 +spec: + type: ClusterIP + ports: + - port: 12020 + protocol: UDP + targetPort: 12020 + name: port-12020 + - port: 13020 + protocol: TCP + targetPort: 13020 + name: port-13020 + - port: 15020 + protocol: TCP + targetPort: 15020 + name: port-15020 + - port: 16020 + protocol: TCP + targetPort: 16020 + name: port-16020 + selector: + app: ssv-node-20 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-20 + name: ssv-node-20 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-20 + template: + metadata: + labels: + app: ssv-node-20 + spec: + containers: + - name: ssv-node-20 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12020 + name: port-12020 + protocol: UDP + hostPort: 12020 + - containerPort: 13020 + name: port-13020 + hostPort: 13020 + - containerPort: 15020 + name: port-15020 + hostPort: 15020 + - containerPort: 16020 + name: port-16020 + hostPort: 16020 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15020" + - name: SSV_API_PORT + value: "16020" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-20 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-20-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-20 + persistentVolumeClaim: + claimName: ssv-node-20 + - name: ssv-node-20-cm + configMap: + name: ssv-node-20-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml new file mode 100644 index 0000000000..7e7a28c0fa --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-21-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-21 +spec: + type: ClusterIP + ports: + - port: 12021 + protocol: UDP + targetPort: 12021 + name: port-12021 + - port: 13021 + protocol: TCP + targetPort: 13021 + name: port-13021 + - port: 15021 + protocol: TCP + targetPort: 15021 + name: port-15021 + - port: 16021 + protocol: TCP + targetPort: 16021 + name: port-16021 + selector: + app: ssv-node-21 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-21 + name: ssv-node-21 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-21 + template: + metadata: + labels: + app: ssv-node-21 + spec: + containers: + - name: ssv-node-21 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + protocol: UDP + hostPort: 12021 + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-21 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-21-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-21 + persistentVolumeClaim: + claimName: ssv-node-21 + - name: ssv-node-21-cm + configMap: + name: ssv-node-21-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml new file mode 100644 index 0000000000..1459d26dc6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-22-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-22 +spec: + type: ClusterIP + ports: + - port: 12022 + protocol: UDP + targetPort: 12022 + name: port-12022 + - port: 13022 + protocol: TCP + targetPort: 13022 + name: port-13022 + - port: 15022 + protocol: TCP + targetPort: 15022 + name: port-15022 + - port: 16022 + protocol: TCP + targetPort: 16022 + name: port-16022 + selector: + app: ssv-node-22 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-22 + name: ssv-node-22 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-22 + template: + metadata: + labels: + app: ssv-node-22 + spec: + containers: + - name: ssv-node-22 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + protocol: UDP + hostPort: 12022 + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-22 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-22-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-22 + persistentVolumeClaim: + claimName: ssv-node-22 + - name: ssv-node-22-cm + configMap: + name: ssv-node-22-cm + hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-1-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml similarity index 52% rename from .k8/stage/ssv-node-v3-1-deployment.yml rename to .k8/hetzner-stage/ssv-node-23-deployment.yml index 59eeab296a..a5eeac635c 100644 --- a/.k8/stage/ssv-node-v3-1-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -2,67 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-1-svc + name: ssv-node-23-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-1 + app: ssv-node-23 spec: type: ClusterIP ports: - - port: 12301 + - port: 12023 protocol: UDP - targetPort: 12301 - name: port-12301 - - port: 13301 + targetPort: 12023 + name: port-12023 + - port: 13023 protocol: TCP - targetPort: 13301 - name: port-13301 - - port: 15301 + targetPort: 13023 + name: port-13023 + - port: 15023 protocol: TCP - targetPort: 15301 - name: port-15301 - - port: 16301 + targetPort: 15023 + name: port-15023 + - port: 16023 protocol: TCP - targetPort: 16301 - name: port-16301 - - port: 16301 - protocol: TCP - targetPort: 16301 - name: port-16301 + targetPort: 16023 + name: port-16023 selector: - app: ssv-node-v3-1 + app: ssv-node-23 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-1 - name: ssv-node-v3-1 + app: ssv-node-23 + name: ssv-node-23 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-1 + app: ssv-node-23 template: metadata: labels: - app: ssv-node-v3-1 + app: ssv-node-23 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-1 + - name: ssv-node-23 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -70,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12301 - name: port-12301 + - containerPort: 12023 + name: port-12023 protocol: UDP - hostPort: 12301 - - containerPort: 13301 - name: port-13301 - hostPort: 13301 - - containerPort: 15301 - name: port-15301 - hostPort: 15301 - - containerPort: 16301 - name: port-16301 - hostPort: 16301 + hostPort: 12023 + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -97,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -114,36 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15301" + value: "15023" - name: SSV_API_PORT - value: "16301" + value: "16023" - name: ENABLE_PROFILE value: "true" - - name: WS_API_PORT - value: "16301" - - name: FULLNODE - value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-1 + name: ssv-node-23 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-1 + name: ssv-node-23-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-1 + - name: ssv-node-23 persistentVolumeClaim: - claimName: ssv-node-v3-1 - - name: ssv-cm-validator-options-v3-1 + claimName: ssv-node-23 + - name: ssv-node-23-cm configMap: - name: ssv-cm-validator-options-v3-1 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-23-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-2-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml similarity index 55% rename from .k8/stage/ssv-node-v3-2-deployment.yml rename to .k8/hetzner-stage/ssv-node-24-deployment.yml index 2daed3c6a8..5cb1e41b5f 100644 --- a/.k8/stage/ssv-node-v3-2-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -2,63 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-2-svc + name: ssv-node-24-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-2 + app: ssv-node-24 spec: type: ClusterIP ports: - - port: 12302 + - port: 12424 protocol: UDP - targetPort: 12302 - name: port-12302 - - port: 13302 + targetPort: 12424 + name: port-12424 + - port: 13024 protocol: TCP - targetPort: 13302 - name: port-13302 - - port: 15302 + targetPort: 13024 + name: port-13024 + - port: 15024 protocol: TCP - targetPort: 15302 - name: port-15302 - - port: 16302 + targetPort: 15024 + name: port-15024 + - port: 16024 protocol: TCP - targetPort: 16302 - name: port-16302 + targetPort: 16024 + name: port-16024 selector: - app: ssv-node-v3-2 + app: ssv-node-24 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-2 - name: ssv-node-v3-2 + app: ssv-node-24 + name: ssv-node-24 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-2 + app: ssv-node-24 template: metadata: labels: - app: ssv-node-v3-2 + app: ssv-node-24 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-2 + - name: ssv-node-24 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -66,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12302 - name: port-12302 + - containerPort: 12424 + name: port-12424 protocol: UDP - hostPort: 12302 - - containerPort: 13302 - name: port-13302 - hostPort: 13302 - - containerPort: 15302 - name: port-15302 - hostPort: 15302 - - containerPort: 16302 - name: port-16302 - hostPort: 16302 + hostPort: 12424 + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -93,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -110,32 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15302" + value: "15024" - name: SSV_API_PORT - value: "16302" + value: "16024" - name: ENABLE_PROFILE value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-2 + name: ssv-node-24 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-2 + name: ssv-node-24-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-2 + - name: ssv-node-24 persistentVolumeClaim: - claimName: ssv-node-v3-2 - - name: ssv-cm-validator-options-v3-2 + claimName: ssv-node-24 + - name: ssv-node-24-cm configMap: - name: ssv-cm-validator-options-v3-2 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-24-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-3-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml similarity index 52% rename from .k8/stage/ssv-node-v3-3-deployment.yml rename to .k8/hetzner-stage/ssv-node-25-deployment.yml index 64bfbbe759..ccd6e42cf2 100644 --- a/.k8/stage/ssv-node-v3-3-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -2,67 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-3-svc + name: ssv-node-25-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-3 + app: ssv-node-25 spec: type: ClusterIP ports: - - port: 12303 + - port: 12025 protocol: UDP - targetPort: 12303 - name: port-12303 - - port: 13303 + targetPort: 12025 + name: port-12025 + - port: 13025 protocol: TCP - targetPort: 13303 - name: port-13303 - - port: 15303 + targetPort: 13025 + name: port-13025 + - port: 15025 protocol: TCP - targetPort: 15303 - name: port-15303 - - port: 16303 + targetPort: 15025 + name: port-15025 + - port: 16025 protocol: TCP - targetPort: 16303 - name: port-16303 - - port: 16303 - protocol: TCP - targetPort: 16303 - name: port-16303 + targetPort: 16025 + name: port-16025 selector: - app: ssv-node-v3-3 + app: ssv-node-25 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-3 - name: ssv-node-v3-3 + app: ssv-node-25 + name: ssv-node-25 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-3 + app: ssv-node-25 template: metadata: labels: - app: ssv-node-v3-3 + app: ssv-node-25 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-3 + - name: ssv-node-25 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -70,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12303 - name: port-12303 + - containerPort: 12025 + name: port-12025 protocol: UDP - hostPort: 12303 - - containerPort: 13303 - name: port-13303 - hostPort: 13303 - - containerPort: 15303 - name: port-15303 - hostPort: 15303 - - containerPort: 16303 - name: port-16303 - hostPort: 16303 + hostPort: 12025 + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -97,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -114,36 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15303" + value: "15025" - name: SSV_API_PORT - value: "16303" + value: "16025" - name: ENABLE_PROFILE value: "true" - - name: WS_API_PORT - value: "16303" - - name: FULLNODE - value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-3 + name: ssv-node-25 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-3 + name: ssv-node-25-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-3 + - name: ssv-node-25 persistentVolumeClaim: - claimName: ssv-node-v3-3 - - name: ssv-cm-validator-options-v3-3 + claimName: ssv-node-25 + - name: ssv-node-25-cm configMap: - name: ssv-cm-validator-options-v3-3 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-25-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-4-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml similarity index 55% rename from .k8/stage/ssv-node-v3-4-deployment.yml rename to .k8/hetzner-stage/ssv-node-26-deployment.yml index b13efb4f5c..396e7360f1 100644 --- a/.k8/stage/ssv-node-v3-4-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -2,63 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-4-svc + name: ssv-node-26-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-4 + app: ssv-node-26 spec: type: ClusterIP ports: - - port: 12304 + - port: 12026 protocol: UDP - targetPort: 12304 - name: port-12304 - - port: 13304 + targetPort: 12026 + name: port-12026 + - port: 13026 protocol: TCP - targetPort: 13304 - name: port-13304 - - port: 15304 + targetPort: 13026 + name: port-13026 + - port: 15026 protocol: TCP - targetPort: 15304 - name: port-15304 - - port: 16304 + targetPort: 15026 + name: port-15026 + - port: 16026 protocol: TCP - targetPort: 16304 - name: port-16304 + targetPort: 16026 + name: port-16026 selector: - app: ssv-node-v3-4 + app: ssv-node-26 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-4 - name: ssv-node-v3-4 + app: ssv-node-26 + name: ssv-node-26 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-4 + app: ssv-node-26 template: metadata: labels: - app: ssv-node-v3-4 + app: ssv-node-26 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-4 + - name: ssv-node-26 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -66,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12304 - name: port-12304 + - containerPort: 12026 + name: port-12026 protocol: UDP - hostPort: 12304 - - containerPort: 13304 - name: port-13304 - hostPort: 13304 - - containerPort: 15304 - name: port-15304 - hostPort: 15304 - - containerPort: 16304 - name: port-16304 - hostPort: 16304 + hostPort: 12026 + - containerPort: 13026 + name: port-13026 + hostPort: 13026 + - containerPort: 15026 + name: port-15026 + hostPort: 15026 + - containerPort: 16026 + name: port-16026 + hostPort: 16026 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -93,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -110,32 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15304" + value: "15026" - name: SSV_API_PORT - value: "16304" + value: "16026" - name: ENABLE_PROFILE value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-4 + name: ssv-node-26 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-4 + name: ssv-node-26-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-4 + - name: ssv-node-26 persistentVolumeClaim: - claimName: ssv-node-v3-4 - - name: ssv-cm-validator-options-v3-4 + claimName: ssv-node-26 + - name: ssv-node-26-cm configMap: - name: ssv-cm-validator-options-v3-4 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-26-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml new file mode 100644 index 0000000000..8674533272 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-27-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-27 +spec: + type: ClusterIP + ports: + - port: 12027 + protocol: UDP + targetPort: 12027 + name: port-12027 + - port: 13027 + protocol: TCP + targetPort: 13027 + name: port-13027 + - port: 15027 + protocol: TCP + targetPort: 15027 + name: port-15027 + - port: 16027 + protocol: TCP + targetPort: 16027 + name: port-16027 + selector: + app: ssv-node-27 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-27 + name: ssv-node-27 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-27 + template: + metadata: + labels: + app: ssv-node-27 + spec: + containers: + - name: ssv-node-27 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12027 + name: port-12027 + protocol: UDP + hostPort: 12027 + - containerPort: 13027 + name: port-13027 + hostPort: 13027 + - containerPort: 15027 + name: port-15027 + hostPort: 15027 + - containerPort: 16027 + name: port-16027 + hostPort: 16027 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15027" + - name: SSV_API_PORT + value: "16027" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-27 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-27-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-27 + persistentVolumeClaim: + claimName: ssv-node-27 + - name: ssv-node-27-cm + configMap: + name: ssv-node-27-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml new file mode 100644 index 0000000000..08712b773b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-28-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-28 +spec: + type: ClusterIP + ports: + - port: 12028 + protocol: UDP + targetPort: 12028 + name: port-12028 + - port: 13028 + protocol: TCP + targetPort: 13028 + name: port-13028 + - port: 15028 + protocol: TCP + targetPort: 15028 + name: port-15028 + - port: 16028 + protocol: TCP + targetPort: 16028 + name: port-16028 + selector: + app: ssv-node-28 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-28 + name: ssv-node-28 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-28 + template: + metadata: + labels: + app: ssv-node-28 + spec: + containers: + - name: ssv-node-28 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12028 + name: port-12028 + protocol: UDP + hostPort: 12028 + - containerPort: 13028 + name: port-13028 + hostPort: 13028 + - containerPort: 15028 + name: port-15028 + hostPort: 15028 + - containerPort: 16028 + name: port-16028 + hostPort: 16028 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15028" + - name: SSV_API_PORT + value: "16028" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-28 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-28-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-28 + persistentVolumeClaim: + claimName: ssv-node-28 + - name: ssv-node-28-cm + configMap: + name: ssv-node-28-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml new file mode 100644 index 0000000000..acb427576c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-29-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-29 +spec: + type: ClusterIP + ports: + - port: 12029 + protocol: UDP + targetPort: 12029 + name: port-12029 + - port: 13029 + protocol: TCP + targetPort: 13029 + name: port-13029 + - port: 15029 + protocol: TCP + targetPort: 15029 + name: port-15029 + - port: 16029 + protocol: TCP + targetPort: 16029 + name: port-16029 + selector: + app: ssv-node-29 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-29 + name: ssv-node-29 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-29 + template: + metadata: + labels: + app: ssv-node-29 + spec: + containers: + - name: ssv-node-29 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12029 + name: port-12029 + protocol: UDP + hostPort: 12029 + - containerPort: 13029 + name: port-13029 + hostPort: 13029 + - containerPort: 15029 + name: port-15029 + hostPort: 15029 + - containerPort: 16029 + name: port-16029 + hostPort: 16029 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15029" + - name: SSV_API_PORT + value: "16029" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-29 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-29-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-29 + persistentVolumeClaim: + claimName: ssv-node-29 + - name: ssv-node-29-cm + configMap: + name: ssv-node-29-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml new file mode 100644 index 0000000000..8486b720d0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-3-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-3 +spec: + type: ClusterIP + ports: + - port: 12003 + protocol: UDP + targetPort: 12003 + name: port-12003 + - port: 13003 + protocol: TCP + targetPort: 13003 + name: port-13003 + - port: 15003 + protocol: TCP + targetPort: 15003 + name: port-15003 + - port: 16003 + protocol: TCP + targetPort: 16003 + name: port-16003 + selector: + app: ssv-node-3 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-3 + name: ssv-node-3 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-3 + template: + metadata: + labels: + app: ssv-node-3 + spec: + containers: + - name: ssv-node-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12003 + name: port-12003 + protocol: UDP + hostPort: 12003 + - containerPort: 13003 + name: port-13003 + hostPort: 13003 + - containerPort: 15003 + name: port-15003 + hostPort: 15003 + - containerPort: 16003 + name: port-16003 + hostPort: 16003 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15003" + - name: SSV_API_PORT + value: "16003" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-3 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-3-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-3 + persistentVolumeClaim: + claimName: ssv-node-3 + - name: ssv-node-3-cm + configMap: + name: ssv-node-3-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml new file mode 100644 index 0000000000..239bbc7302 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-30-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-30 +spec: + type: ClusterIP + ports: + - port: 12030 + protocol: UDP + targetPort: 12030 + name: port-12030 + - port: 13030 + protocol: TCP + targetPort: 13030 + name: port-13030 + - port: 15030 + protocol: TCP + targetPort: 15030 + name: port-15030 + - port: 16030 + protocol: TCP + targetPort: 16030 + name: port-16030 + selector: + app: ssv-node-30 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-30 + name: ssv-node-30 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-30 + template: + metadata: + labels: + app: ssv-node-30 + spec: + containers: + - name: ssv-node-30 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12030 + name: port-12030 + protocol: UDP + hostPort: 12030 + - containerPort: 13030 + name: port-13030 + hostPort: 13030 + - containerPort: 15030 + name: port-15030 + hostPort: 15030 + - containerPort: 16030 + name: port-16030 + hostPort: 16030 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15030" + - name: SSV_API_PORT + value: "16030" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-30 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-30-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-30 + persistentVolumeClaim: + claimName: ssv-node-30 + - name: ssv-node-30-cm + configMap: + name: ssv-node-30-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml new file mode 100644 index 0000000000..af78e460ce --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-31-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-31 +spec: + type: ClusterIP + ports: + - port: 12031 + protocol: UDP + targetPort: 12031 + name: port-12031 + - port: 13031 + protocol: TCP + targetPort: 13031 + name: port-13031 + - port: 15031 + protocol: TCP + targetPort: 15031 + name: port-15031 + - port: 16031 + protocol: TCP + targetPort: 16031 + name: port-16031 + selector: + app: ssv-node-31 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-31 + name: ssv-node-31 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-31 + template: + metadata: + labels: + app: ssv-node-31 + spec: + containers: + - name: ssv-node-31 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12031 + name: port-12031 + protocol: UDP + hostPort: 12031 + - containerPort: 13031 + name: port-13031 + hostPort: 13031 + - containerPort: 15031 + name: port-15031 + hostPort: 15031 + - containerPort: 16031 + name: port-16031 + hostPort: 16031 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15031" + - name: SSV_API_PORT + value: "16031" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-31 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-31-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-31 + persistentVolumeClaim: + claimName: ssv-node-31 + - name: ssv-node-31-cm + configMap: + name: ssv-node-31-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml new file mode 100644 index 0000000000..d6567ac81e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-32-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-32 +spec: + type: ClusterIP + ports: + - port: 12032 + protocol: UDP + targetPort: 12032 + name: port-12032 + - port: 13032 + protocol: TCP + targetPort: 13032 + name: port-13032 + - port: 15032 + protocol: TCP + targetPort: 15032 + name: port-15032 + - port: 16032 + protocol: TCP + targetPort: 16032 + name: port-16032 + selector: + app: ssv-node-32 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-32 + name: ssv-node-32 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-32 + template: + metadata: + labels: + app: ssv-node-32 + spec: + containers: + - name: ssv-node-32 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12032 + name: port-12032 + protocol: UDP + hostPort: 12032 + - containerPort: 13032 + name: port-13032 + hostPort: 13032 + - containerPort: 15032 + name: port-15032 + hostPort: 15032 + - containerPort: 16032 + name: port-16032 + hostPort: 16032 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15032" + - name: SSV_API_PORT + value: "16032" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-32 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-32-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-32 + persistentVolumeClaim: + claimName: ssv-node-32 + - name: ssv-node-32-cm + configMap: + name: ssv-node-32-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml new file mode 100644 index 0000000000..6b72d090df --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-33-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-33 +spec: + type: ClusterIP + ports: + - port: 12033 + protocol: UDP + targetPort: 12033 + name: port-12033 + - port: 13033 + protocol: TCP + targetPort: 13033 + name: port-13033 + - port: 15033 + protocol: TCP + targetPort: 15033 + name: port-15033 + - port: 16033 + protocol: TCP + targetPort: 16033 + name: port-16033 + selector: + app: ssv-node-33 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-33 + name: ssv-node-33 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-33 + template: + metadata: + labels: + app: ssv-node-33 + spec: + containers: + - name: ssv-node-33 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12033 + name: port-12033 + protocol: UDP + hostPort: 12033 + - containerPort: 13033 + name: port-13033 + hostPort: 13033 + - containerPort: 15033 + name: port-15033 + hostPort: 15033 + - containerPort: 16033 + name: port-16033 + hostPort: 16033 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15033" + - name: SSV_API_PORT + value: "16033" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-33 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-33-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-33 + persistentVolumeClaim: + claimName: ssv-node-33 + - name: ssv-node-33-cm + configMap: + name: ssv-node-33-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml new file mode 100644 index 0000000000..363b7b16d3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-34-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-34 +spec: + type: ClusterIP + ports: + - port: 12034 + protocol: UDP + targetPort: 12034 + name: port-12034 + - port: 13034 + protocol: TCP + targetPort: 13034 + name: port-13034 + - port: 15034 + protocol: TCP + targetPort: 15034 + name: port-15034 + - port: 16034 + protocol: TCP + targetPort: 16034 + name: port-16034 + selector: + app: ssv-node-34 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-34 + name: ssv-node-34 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-34 + template: + metadata: + labels: + app: ssv-node-34 + spec: + containers: + - name: ssv-node-34 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12034 + name: port-12034 + protocol: UDP + hostPort: 12034 + - containerPort: 13034 + name: port-13034 + hostPort: 13034 + - containerPort: 15034 + name: port-15034 + hostPort: 15034 + - containerPort: 16034 + name: port-16034 + hostPort: 16034 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15034" + - name: SSV_API_PORT + value: "16034" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-34 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-34-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-34 + persistentVolumeClaim: + claimName: ssv-node-34 + - name: ssv-node-34-cm + configMap: + name: ssv-node-34-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml new file mode 100644 index 0000000000..0693b7da9d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-35-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-35 +spec: + type: ClusterIP + ports: + - port: 12035 + protocol: UDP + targetPort: 12035 + name: port-12035 + - port: 13035 + protocol: TCP + targetPort: 13035 + name: port-13035 + - port: 15035 + protocol: TCP + targetPort: 15035 + name: port-15035 + - port: 16035 + protocol: TCP + targetPort: 16035 + name: port-16035 + selector: + app: ssv-node-35 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-35 + name: ssv-node-35 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-35 + template: + metadata: + labels: + app: ssv-node-35 + spec: + containers: + - name: ssv-node-35 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12035 + name: port-12035 + protocol: UDP + hostPort: 12035 + - containerPort: 13035 + name: port-13035 + hostPort: 13035 + - containerPort: 15035 + name: port-15035 + hostPort: 15035 + - containerPort: 16035 + name: port-16035 + hostPort: 16035 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15035" + - name: SSV_API_PORT + value: "16035" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-35 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-35-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-35 + persistentVolumeClaim: + claimName: ssv-node-35 + - name: ssv-node-35-cm + configMap: + name: ssv-node-35-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml new file mode 100644 index 0000000000..65a1566a23 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-36-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-36 +spec: + type: ClusterIP + ports: + - port: 12036 + protocol: UDP + targetPort: 12036 + name: port-12036 + - port: 13036 + protocol: TCP + targetPort: 13036 + name: port-13036 + - port: 15036 + protocol: TCP + targetPort: 15036 + name: port-15036 + - port: 16036 + protocol: TCP + targetPort: 16036 + name: port-16036 + selector: + app: ssv-node-36 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-36 + name: ssv-node-36 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-36 + template: + metadata: + labels: + app: ssv-node-36 + spec: + containers: + - name: ssv-node-36 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12036 + name: port-12036 + protocol: UDP + hostPort: 12036 + - containerPort: 13036 + name: port-13036 + hostPort: 13036 + - containerPort: 15036 + name: port-15036 + hostPort: 15036 + - containerPort: 16036 + name: port-16036 + hostPort: 16036 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15036" + - name: SSV_API_PORT + value: "16036" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-36 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-36-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-36 + persistentVolumeClaim: + claimName: ssv-node-36 + - name: ssv-node-36-cm + configMap: + name: ssv-node-36-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml new file mode 100644 index 0000000000..3c312c1560 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-37-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-37 +spec: + type: ClusterIP + ports: + - port: 12037 + protocol: UDP + targetPort: 12037 + name: port-12037 + - port: 13037 + protocol: TCP + targetPort: 13037 + name: port-13037 + - port: 15037 + protocol: TCP + targetPort: 15037 + name: port-15037 + - port: 16037 + protocol: TCP + targetPort: 16037 + name: port-16037 + selector: + app: ssv-node-37 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-37 + name: ssv-node-37 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-37 + template: + metadata: + labels: + app: ssv-node-37 + spec: + containers: + - name: ssv-node-37 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12037 + name: port-12037 + protocol: UDP + hostPort: 12037 + - containerPort: 13037 + name: port-13037 + hostPort: 13037 + - containerPort: 15037 + name: port-15037 + hostPort: 15037 + - containerPort: 16037 + name: port-16037 + hostPort: 16037 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15037" + - name: SSV_API_PORT + value: "16037" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-37 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-37-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-37 + persistentVolumeClaim: + claimName: ssv-node-37 + - name: ssv-node-37-cm + configMap: + name: ssv-node-37-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml new file mode 100644 index 0000000000..ba3e0dacb2 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-38-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-38 +spec: + type: ClusterIP + ports: + - port: 12038 + protocol: UDP + targetPort: 12038 + name: port-12038 + - port: 13038 + protocol: TCP + targetPort: 13038 + name: port-13038 + - port: 15038 + protocol: TCP + targetPort: 15038 + name: port-15038 + - port: 16038 + protocol: TCP + targetPort: 16038 + name: port-16038 + selector: + app: ssv-node-38 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-38 + name: ssv-node-38 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-38 + template: + metadata: + labels: + app: ssv-node-38 + spec: + containers: + - name: ssv-node-38 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12038 + name: port-12038 + protocol: UDP + hostPort: 12038 + - containerPort: 13038 + name: port-13038 + hostPort: 13038 + - containerPort: 15038 + name: port-15038 + hostPort: 15038 + - containerPort: 16038 + name: port-16038 + hostPort: 16038 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15038" + - name: SSV_API_PORT + value: "16038" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-38 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-38-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-38 + persistentVolumeClaim: + claimName: ssv-node-38 + - name: ssv-node-38-cm + configMap: + name: ssv-node-38-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml new file mode 100644 index 0000000000..cef15eed57 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-39-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-39 +spec: + type: ClusterIP + ports: + - port: 12039 + protocol: UDP + targetPort: 12039 + name: port-12039 + - port: 13039 + protocol: TCP + targetPort: 13039 + name: port-13039 + - port: 15039 + protocol: TCP + targetPort: 15039 + name: port-15039 + - port: 16039 + protocol: TCP + targetPort: 16039 + name: port-16039 + selector: + app: ssv-node-39 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-39 + name: ssv-node-39 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-39 + template: + metadata: + labels: + app: ssv-node-39 + spec: + containers: + - name: ssv-node-39 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12039 + name: port-12039 + protocol: UDP + hostPort: 12039 + - containerPort: 13039 + name: port-13039 + hostPort: 13039 + - containerPort: 15039 + name: port-15039 + hostPort: 15039 + - containerPort: 16039 + name: port-16039 + hostPort: 16039 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15039" + - name: SSV_API_PORT + value: "16039" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-39 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-39-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-39 + persistentVolumeClaim: + claimName: ssv-node-39 + - name: ssv-node-39-cm + configMap: + name: ssv-node-39-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml new file mode 100644 index 0000000000..758473cb70 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-4-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-4 +spec: + type: ClusterIP + ports: + - port: 12004 + protocol: UDP + targetPort: 12004 + name: port-12004 + - port: 13004 + protocol: TCP + targetPort: 13004 + name: port-13004 + - port: 15004 + protocol: TCP + targetPort: 15004 + name: port-15004 + - port: 16004 + protocol: TCP + targetPort: 16004 + name: port-16004 + selector: + app: ssv-node-4 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-4 + name: ssv-node-4 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-4 + template: + metadata: + labels: + app: ssv-node-4 + spec: + containers: + - name: ssv-node-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12004 + name: port-12004 + protocol: UDP + hostPort: 12004 + - containerPort: 13004 + name: port-13004 + hostPort: 13004 + - containerPort: 15004 + name: port-15004 + hostPort: 15004 + - containerPort: 16004 + name: port-16004 + hostPort: 16004 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15004" + - name: SSV_API_PORT + value: "16004" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-4 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-4-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-4 + persistentVolumeClaim: + claimName: ssv-node-4 + - name: ssv-node-4-cm + configMap: + name: ssv-node-4-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml new file mode 100644 index 0000000000..022eded9fd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-40-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-40 +spec: + type: ClusterIP + ports: + - port: 12040 + protocol: UDP + targetPort: 12040 + name: port-12040 + - port: 13040 + protocol: TCP + targetPort: 13040 + name: port-13040 + - port: 15040 + protocol: TCP + targetPort: 15040 + name: port-15040 + - port: 16040 + protocol: TCP + targetPort: 16040 + name: port-16040 + selector: + app: ssv-node-40 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-40 + name: ssv-node-40 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-40 + template: + metadata: + labels: + app: ssv-node-40 + spec: + containers: + - name: ssv-node-40 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12040 + name: port-12040 + protocol: UDP + hostPort: 12040 + - containerPort: 13040 + name: port-13040 + hostPort: 13040 + - containerPort: 15040 + name: port-15040 + hostPort: 15040 + - containerPort: 16040 + name: port-16040 + hostPort: 16040 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15040" + - name: SSV_API_PORT + value: "16040" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-40 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-40-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-40 + persistentVolumeClaim: + claimName: ssv-node-40 + - name: ssv-node-40-cm + configMap: + name: ssv-node-40-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml new file mode 100644 index 0000000000..b2fc6fcad1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-41-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-41 +spec: + type: ClusterIP + ports: + - port: 12041 + protocol: UDP + targetPort: 12041 + name: port-12041 + - port: 13041 + protocol: TCP + targetPort: 13041 + name: port-13041 + - port: 15041 + protocol: TCP + targetPort: 15041 + name: port-15041 + - port: 16041 + protocol: TCP + targetPort: 16041 + name: port-16041 + selector: + app: ssv-node-41 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-41 + name: ssv-node-41 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-41 + template: + metadata: + labels: + app: ssv-node-41 + spec: + containers: + - name: ssv-node-41 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12041 + name: port-12041 + protocol: UDP + hostPort: 12041 + - containerPort: 13041 + name: port-13041 + hostPort: 13041 + - containerPort: 15041 + name: port-15041 + hostPort: 15041 + - containerPort: 16041 + name: port-16041 + hostPort: 16041 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15041" + - name: SSV_API_PORT + value: "16041" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-41 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-41-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-41 + persistentVolumeClaim: + claimName: ssv-node-41 + - name: ssv-node-41-cm + configMap: + name: ssv-node-41-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml new file mode 100644 index 0000000000..3664aeca45 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-42-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-42 +spec: + type: ClusterIP + ports: + - port: 12042 + protocol: UDP + targetPort: 12042 + name: port-12042 + - port: 13042 + protocol: TCP + targetPort: 13042 + name: port-13042 + - port: 15042 + protocol: TCP + targetPort: 15042 + name: port-15042 + - port: 16042 + protocol: TCP + targetPort: 16042 + name: port-16042 + selector: + app: ssv-node-42 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-42 + name: ssv-node-42 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-42 + template: + metadata: + labels: + app: ssv-node-42 + spec: + containers: + - name: ssv-node-42 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12042 + name: port-12042 + protocol: UDP + hostPort: 12042 + - containerPort: 13042 + name: port-13042 + hostPort: 13042 + - containerPort: 15042 + name: port-15042 + hostPort: 15042 + - containerPort: 16042 + name: port-16042 + hostPort: 16042 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15042" + - name: SSV_API_PORT + value: "16042" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-42 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-42-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-42 + persistentVolumeClaim: + claimName: ssv-node-42 + - name: ssv-node-42-cm + configMap: + name: ssv-node-42-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml new file mode 100644 index 0000000000..a9cd4f9b95 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-43-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-43 +spec: + type: ClusterIP + ports: + - port: 12043 + protocol: UDP + targetPort: 12043 + name: port-12043 + - port: 13043 + protocol: TCP + targetPort: 13043 + name: port-13043 + - port: 15043 + protocol: TCP + targetPort: 15043 + name: port-15043 + - port: 16043 + protocol: TCP + targetPort: 16043 + name: port-16043 + selector: + app: ssv-node-43 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-43 + name: ssv-node-43 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-43 + template: + metadata: + labels: + app: ssv-node-43 + spec: + containers: + - name: ssv-node-43 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12043 + name: port-12043 + protocol: UDP + hostPort: 12043 + - containerPort: 13043 + name: port-13043 + hostPort: 13043 + - containerPort: 15043 + name: port-15043 + hostPort: 15043 + - containerPort: 16043 + name: port-16043 + hostPort: 16043 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15043" + - name: SSV_API_PORT + value: "16043" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-43 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-43-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-43 + persistentVolumeClaim: + claimName: ssv-node-43 + - name: ssv-node-43-cm + configMap: + name: ssv-node-43-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml new file mode 100644 index 0000000000..01d0e22a17 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-44-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-44 +spec: + type: ClusterIP + ports: + - port: 12044 + protocol: UDP + targetPort: 12044 + name: port-12044 + - port: 13044 + protocol: TCP + targetPort: 13044 + name: port-13044 + - port: 15044 + protocol: TCP + targetPort: 15044 + name: port-15044 + - port: 16044 + protocol: TCP + targetPort: 16044 + name: port-16044 + selector: + app: ssv-node-44 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-44 + name: ssv-node-44 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-44 + template: + metadata: + labels: + app: ssv-node-44 + spec: + containers: + - name: ssv-node-44 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12044 + name: port-12044 + protocol: UDP + hostPort: 12044 + - containerPort: 13044 + name: port-13044 + hostPort: 13044 + - containerPort: 15044 + name: port-15044 + hostPort: 15044 + - containerPort: 16044 + name: port-16044 + hostPort: 16044 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15044" + - name: SSV_API_PORT + value: "16044" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-44 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-44-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-44 + persistentVolumeClaim: + claimName: ssv-node-44 + - name: ssv-node-44-cm + configMap: + name: ssv-node-44-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml new file mode 100644 index 0000000000..81c4760282 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-45-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-45 +spec: + type: ClusterIP + ports: + - port: 12045 + protocol: UDP + targetPort: 12045 + name: port-12045 + - port: 13045 + protocol: TCP + targetPort: 13045 + name: port-13045 + - port: 15045 + protocol: TCP + targetPort: 15045 + name: port-15045 + - port: 16045 + protocol: TCP + targetPort: 16045 + name: port-16045 + selector: + app: ssv-node-45 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-45 + name: ssv-node-45 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-45 + template: + metadata: + labels: + app: ssv-node-45 + spec: + containers: + - name: ssv-node-45 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12045 + name: port-12045 + protocol: UDP + hostPort: 12045 + - containerPort: 13045 + name: port-13045 + hostPort: 13045 + - containerPort: 15045 + name: port-15045 + hostPort: 15045 + - containerPort: 16045 + name: port-16045 + hostPort: 16045 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15045" + - name: SSV_API_PORT + value: "16045" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-45 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-45-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-45 + persistentVolumeClaim: + claimName: ssv-node-45 + - name: ssv-node-45-cm + configMap: + name: ssv-node-45-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml new file mode 100644 index 0000000000..57526b672c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-46-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-46 +spec: + type: ClusterIP + ports: + - port: 12046 + protocol: UDP + targetPort: 12046 + name: port-12046 + - port: 13046 + protocol: TCP + targetPort: 13046 + name: port-13046 + - port: 15046 + protocol: TCP + targetPort: 15046 + name: port-15046 + - port: 16046 + protocol: TCP + targetPort: 16046 + name: port-16046 + selector: + app: ssv-node-46 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-46 + name: ssv-node-46 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-46 + template: + metadata: + labels: + app: ssv-node-46 + spec: + containers: + - name: ssv-node-46 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12046 + name: port-12046 + protocol: UDP + hostPort: 12046 + - containerPort: 13046 + name: port-13046 + hostPort: 13046 + - containerPort: 15046 + name: port-15046 + hostPort: 15046 + - containerPort: 16046 + name: port-16046 + hostPort: 16046 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15046" + - name: SSV_API_PORT + value: "16046" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-46 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-46-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-46 + persistentVolumeClaim: + claimName: ssv-node-46 + - name: ssv-node-46-cm + configMap: + name: ssv-node-46-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml new file mode 100644 index 0000000000..8d832b2158 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-47-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-47 +spec: + type: ClusterIP + ports: + - port: 12047 + protocol: UDP + targetPort: 12047 + name: port-12047 + - port: 13047 + protocol: TCP + targetPort: 13047 + name: port-13047 + - port: 15047 + protocol: TCP + targetPort: 15047 + name: port-15047 + - port: 16047 + protocol: TCP + targetPort: 16047 + name: port-16047 + selector: + app: ssv-node-47 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-47 + name: ssv-node-47 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-47 + template: + metadata: + labels: + app: ssv-node-47 + spec: + containers: + - name: ssv-node-47 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12047 + name: port-12047 + protocol: UDP + hostPort: 12047 + - containerPort: 13047 + name: port-13047 + hostPort: 13047 + - containerPort: 15047 + name: port-15047 + hostPort: 15047 + - containerPort: 16047 + name: port-16047 + hostPort: 16047 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15047" + - name: SSV_API_PORT + value: "16047" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-47 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-47-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-47 + persistentVolumeClaim: + claimName: ssv-node-47 + - name: ssv-node-47-cm + configMap: + name: ssv-node-47-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml new file mode 100644 index 0000000000..3c6fcbc533 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-48-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-48 +spec: + type: ClusterIP + ports: + - port: 12048 + protocol: UDP + targetPort: 12048 + name: port-12048 + - port: 13048 + protocol: TCP + targetPort: 13048 + name: port-13048 + - port: 15048 + protocol: TCP + targetPort: 15048 + name: port-15048 + - port: 16048 + protocol: TCP + targetPort: 16048 + name: port-16048 + selector: + app: ssv-node-48 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-48 + name: ssv-node-48 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-48 + template: + metadata: + labels: + app: ssv-node-48 + spec: + containers: + - name: ssv-node-48 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12048 + name: port-12048 + protocol: UDP + hostPort: 12048 + - containerPort: 13048 + name: port-13048 + hostPort: 13048 + - containerPort: 15048 + name: port-15048 + hostPort: 15048 + - containerPort: 16048 + name: port-16048 + hostPort: 16048 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15048" + - name: SSV_API_PORT + value: "16048" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-48 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-48-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-48 + persistentVolumeClaim: + claimName: ssv-node-48 + - name: ssv-node-48-cm + configMap: + name: ssv-node-48-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml new file mode 100644 index 0000000000..16c168c0c0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-49-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-49 +spec: + type: ClusterIP + ports: + - port: 12049 + protocol: UDP + targetPort: 12049 + name: port-12049 + - port: 13049 + protocol: TCP + targetPort: 13049 + name: port-13049 + - port: 15049 + protocol: TCP + targetPort: 15049 + name: port-15049 + - port: 16049 + protocol: TCP + targetPort: 16049 + name: port-16049 + selector: + app: ssv-node-49 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-49 + name: ssv-node-49 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-49 + template: + metadata: + labels: + app: ssv-node-49 + spec: + containers: + - name: ssv-node-49 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12049 + name: port-12049 + protocol: UDP + hostPort: 12049 + - containerPort: 13049 + name: port-13049 + hostPort: 13049 + - containerPort: 15049 + name: port-15049 + hostPort: 15049 + - containerPort: 16049 + name: port-16049 + hostPort: 16049 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15049" + - name: SSV_API_PORT + value: "16049" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-49 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-49-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-49 + persistentVolumeClaim: + claimName: ssv-node-49 + - name: ssv-node-49-cm + configMap: + name: ssv-node-49-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml new file mode 100644 index 0000000000..0c4f294174 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-5-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-5 +spec: + type: ClusterIP + ports: + - port: 12005 + protocol: UDP + targetPort: 12005 + name: port-12005 + - port: 13005 + protocol: TCP + targetPort: 13005 + name: port-13005 + - port: 15005 + protocol: TCP + targetPort: 15005 + name: port-15005 + - port: 16005 + protocol: TCP + targetPort: 16005 + name: port-16005 + selector: + app: ssv-node-5 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-5 + name: ssv-node-5 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-5 + template: + metadata: + labels: + app: ssv-node-5 + spec: + containers: + - name: ssv-node-5 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12005 + name: port-12005 + protocol: UDP + hostPort: 12005 + - containerPort: 13005 + name: port-13005 + hostPort: 13005 + - containerPort: 15005 + name: port-15005 + hostPort: 15005 + - containerPort: 16005 + name: port-16005 + hostPort: 16005 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15005" + - name: SSV_API_PORT + value: "16005" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-5 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-5-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-5 + persistentVolumeClaim: + claimName: ssv-node-5 + - name: ssv-node-5-cm + configMap: + name: ssv-node-5-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml new file mode 100644 index 0000000000..237964637e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-50-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-50 +spec: + type: ClusterIP + ports: + - port: 12050 + protocol: UDP + targetPort: 12050 + name: port-12050 + - port: 13050 + protocol: TCP + targetPort: 13050 + name: port-13050 + - port: 15050 + protocol: TCP + targetPort: 15050 + name: port-15050 + - port: 16050 + protocol: TCP + targetPort: 16050 + name: port-16050 + selector: + app: ssv-node-50 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-50 + name: ssv-node-50 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-50 + template: + metadata: + labels: + app: ssv-node-50 + spec: + containers: + - name: ssv-node-50 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12050 + name: port-12050 + protocol: UDP + hostPort: 12050 + - containerPort: 13050 + name: port-13050 + hostPort: 13050 + - containerPort: 15050 + name: port-15050 + hostPort: 15050 + - containerPort: 16050 + name: port-16050 + hostPort: 16050 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15050" + - name: SSV_API_PORT + value: "16050" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-50 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-50-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-50 + persistentVolumeClaim: + claimName: ssv-node-50 + - name: ssv-node-50-cm + configMap: + name: ssv-node-50-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml new file mode 100644 index 0000000000..028ac33bde --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-51-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-51 +spec: + type: ClusterIP + ports: + - port: 12051 + protocol: UDP + targetPort: 12051 + name: port-12051 + - port: 13051 + protocol: TCP + targetPort: 13051 + name: port-13051 + - port: 15051 + protocol: TCP + targetPort: 15051 + name: port-15051 + - port: 16051 + protocol: TCP + targetPort: 16051 + name: port-16051 + selector: + app: ssv-node-51 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-51 + name: ssv-node-51 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-51 + template: + metadata: + labels: + app: ssv-node-51 + spec: + containers: + - name: ssv-node-51 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12051 + name: port-12051 + protocol: UDP + hostPort: 12051 + - containerPort: 13051 + name: port-13051 + hostPort: 13051 + - containerPort: 15051 + name: port-15051 + hostPort: 15051 + - containerPort: 16051 + name: port-16051 + hostPort: 16051 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15051" + - name: SSV_API_PORT + value: "16051" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-51 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-51-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-51 + persistentVolumeClaim: + claimName: ssv-node-51 + - name: ssv-node-51-cm + configMap: + name: ssv-node-51-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml new file mode 100644 index 0000000000..9f2eb3d888 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-52-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-52 +spec: + type: ClusterIP + ports: + - port: 12052 + protocol: UDP + targetPort: 12052 + name: port-12052 + - port: 13052 + protocol: TCP + targetPort: 13052 + name: port-13052 + - port: 15052 + protocol: TCP + targetPort: 15052 + name: port-15052 + - port: 16052 + protocol: TCP + targetPort: 16052 + name: port-16052 + selector: + app: ssv-node-52 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-52 + name: ssv-node-52 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-52 + template: + metadata: + labels: + app: ssv-node-52 + spec: + containers: + - name: ssv-node-52 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12052 + name: port-12052 + protocol: UDP + hostPort: 12052 + - containerPort: 13052 + name: port-13052 + hostPort: 13052 + - containerPort: 15052 + name: port-15052 + hostPort: 15052 + - containerPort: 16052 + name: port-16052 + hostPort: 16052 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15052" + - name: SSV_API_PORT + value: "16052" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-52 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-52-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-52 + persistentVolumeClaim: + claimName: ssv-node-52 + - name: ssv-node-52-cm + configMap: + name: ssv-node-52-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml new file mode 100644 index 0000000000..68515c515b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-53-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-53 +spec: + type: ClusterIP + ports: + - port: 12053 + protocol: UDP + targetPort: 12053 + name: port-12053 + - port: 13053 + protocol: TCP + targetPort: 13053 + name: port-13053 + - port: 15053 + protocol: TCP + targetPort: 15053 + name: port-15053 + - port: 16053 + protocol: TCP + targetPort: 16053 + name: port-16053 + selector: + app: ssv-node-53 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-53 + name: ssv-node-53 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-53 + template: + metadata: + labels: + app: ssv-node-53 + spec: + containers: + - name: ssv-node-53 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12053 + name: port-12053 + protocol: UDP + hostPort: 12053 + - containerPort: 13053 + name: port-13053 + hostPort: 13053 + - containerPort: 15053 + name: port-15053 + hostPort: 15053 + - containerPort: 16053 + name: port-16053 + hostPort: 16053 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15053" + - name: SSV_API_PORT + value: "16053" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-53 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-53-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-53 + persistentVolumeClaim: + claimName: ssv-node-53 + - name: ssv-node-53-cm + configMap: + name: ssv-node-53-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml new file mode 100644 index 0000000000..9eb12dd56b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-54-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-54 +spec: + type: ClusterIP + ports: + - port: 12054 + protocol: UDP + targetPort: 12054 + name: port-12054 + - port: 13054 + protocol: TCP + targetPort: 13054 + name: port-13054 + - port: 15054 + protocol: TCP + targetPort: 15054 + name: port-15054 + - port: 16054 + protocol: TCP + targetPort: 16054 + name: port-16054 + selector: + app: ssv-node-54 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-54 + name: ssv-node-54 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-54 + template: + metadata: + labels: + app: ssv-node-54 + spec: + containers: + - name: ssv-node-54 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12054 + name: port-12054 + protocol: UDP + hostPort: 12054 + - containerPort: 13054 + name: port-13054 + hostPort: 13054 + - containerPort: 15054 + name: port-15054 + hostPort: 15054 + - containerPort: 16054 + name: port-16054 + hostPort: 16054 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15054" + - name: SSV_API_PORT + value: "16054" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-54 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-54-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-54 + persistentVolumeClaim: + claimName: ssv-node-54 + - name: ssv-node-54-cm + configMap: + name: ssv-node-54-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml new file mode 100644 index 0000000000..05a109197b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-55-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-55 +spec: + type: ClusterIP + ports: + - port: 12055 + protocol: UDP + targetPort: 12055 + name: port-12055 + - port: 13055 + protocol: TCP + targetPort: 13055 + name: port-13055 + - port: 15055 + protocol: TCP + targetPort: 15055 + name: port-15055 + - port: 16055 + protocol: TCP + targetPort: 16055 + name: port-16055 + selector: + app: ssv-node-55 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-55 + name: ssv-node-55 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-55 + template: + metadata: + labels: + app: ssv-node-55 + spec: + containers: + - name: ssv-node-55 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12055 + name: port-12055 + protocol: UDP + hostPort: 12055 + - containerPort: 13055 + name: port-13055 + hostPort: 13055 + - containerPort: 15055 + name: port-15055 + hostPort: 15055 + - containerPort: 16055 + name: port-16055 + hostPort: 16055 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15055" + - name: SSV_API_PORT + value: "16055" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-55 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-55-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-55 + persistentVolumeClaim: + claimName: ssv-node-55 + - name: ssv-node-55-cm + configMap: + name: ssv-node-55-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml new file mode 100644 index 0000000000..42c0c59b42 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-56-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-56 +spec: + type: ClusterIP + ports: + - port: 12056 + protocol: UDP + targetPort: 12056 + name: port-12056 + - port: 13056 + protocol: TCP + targetPort: 13056 + name: port-13056 + - port: 15056 + protocol: TCP + targetPort: 15056 + name: port-15056 + - port: 16056 + protocol: TCP + targetPort: 16056 + name: port-16056 + selector: + app: ssv-node-56 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-56 + name: ssv-node-56 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-56 + template: + metadata: + labels: + app: ssv-node-56 + spec: + containers: + - name: ssv-node-56 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12056 + name: port-12056 + protocol: UDP + hostPort: 12056 + - containerPort: 13056 + name: port-13056 + hostPort: 13056 + - containerPort: 15056 + name: port-15056 + hostPort: 15056 + - containerPort: 16056 + name: port-16056 + hostPort: 16056 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15056" + - name: SSV_API_PORT + value: "16056" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-56 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-56-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-56 + persistentVolumeClaim: + claimName: ssv-node-56 + - name: ssv-node-56-cm + configMap: + name: ssv-node-56-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml new file mode 100644 index 0000000000..d2d8945516 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-57-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-57 +spec: + type: ClusterIP + ports: + - port: 12057 + protocol: UDP + targetPort: 12057 + name: port-12057 + - port: 13057 + protocol: TCP + targetPort: 13057 + name: port-13057 + - port: 15057 + protocol: TCP + targetPort: 15057 + name: port-15057 + - port: 16057 + protocol: TCP + targetPort: 16057 + name: port-16057 + selector: + app: ssv-node-57 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-57 + name: ssv-node-57 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-57 + template: + metadata: + labels: + app: ssv-node-57 + spec: + containers: + - name: ssv-node-57 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12057 + name: port-12057 + protocol: UDP + hostPort: 12057 + - containerPort: 13057 + name: port-13057 + hostPort: 13057 + - containerPort: 15057 + name: port-15057 + hostPort: 15057 + - containerPort: 16057 + name: port-16057 + hostPort: 16057 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15057" + - name: SSV_API_PORT + value: "16057" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-57 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-57-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-57 + persistentVolumeClaim: + claimName: ssv-node-57 + - name: ssv-node-57-cm + configMap: + name: ssv-node-57-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml new file mode 100644 index 0000000000..21401421dd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-58-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-58 +spec: + type: ClusterIP + ports: + - port: 12058 + protocol: UDP + targetPort: 12058 + name: port-12058 + - port: 13058 + protocol: TCP + targetPort: 13058 + name: port-13058 + - port: 15858 + protocol: TCP + targetPort: 15858 + name: port-15858 + - port: 16058 + protocol: TCP + targetPort: 16058 + name: port-16058 + selector: + app: ssv-node-58 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-58 + name: ssv-node-58 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-58 + template: + metadata: + labels: + app: ssv-node-58 + spec: + containers: + - name: ssv-node-58 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12058 + name: port-12058 + protocol: UDP + hostPort: 12058 + - containerPort: 13058 + name: port-13058 + hostPort: 13058 + - containerPort: 15858 + name: port-15858 + hostPort: 15858 + - containerPort: 16058 + name: port-16058 + hostPort: 16058 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15858" + - name: SSV_API_PORT + value: "16058" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-58 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-58-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-58 + persistentVolumeClaim: + claimName: ssv-node-58 + - name: ssv-node-58-cm + configMap: + name: ssv-node-58-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml new file mode 100644 index 0000000000..8cefa6ba7b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-59-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-59 +spec: + type: ClusterIP + ports: + - port: 12059 + protocol: UDP + targetPort: 12059 + name: port-12059 + - port: 13059 + protocol: TCP + targetPort: 13059 + name: port-13059 + - port: 15059 + protocol: TCP + targetPort: 15059 + name: port-15059 + - port: 16059 + protocol: TCP + targetPort: 16059 + name: port-16059 + selector: + app: ssv-node-59 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-59 + name: ssv-node-59 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-59 + template: + metadata: + labels: + app: ssv-node-59 + spec: + containers: + - name: ssv-node-59 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12059 + name: port-12059 + protocol: UDP + hostPort: 12059 + - containerPort: 13059 + name: port-13059 + hostPort: 13059 + - containerPort: 15059 + name: port-15059 + hostPort: 15059 + - containerPort: 16059 + name: port-16059 + hostPort: 16059 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15059" + - name: SSV_API_PORT + value: "16059" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-59 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-59-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-59 + persistentVolumeClaim: + claimName: ssv-node-59 + - name: ssv-node-59-cm + configMap: + name: ssv-node-59-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml new file mode 100644 index 0000000000..6eff03c297 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-6-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-6 +spec: + type: ClusterIP + ports: + - port: 12006 + protocol: UDP + targetPort: 12006 + name: port-12006 + - port: 13006 + protocol: TCP + targetPort: 13006 + name: port-13006 + - port: 15006 + protocol: TCP + targetPort: 15006 + name: port-15006 + - port: 16006 + protocol: TCP + targetPort: 16006 + name: port-16006 + selector: + app: ssv-node-6 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-6 + name: ssv-node-6 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-6 + template: + metadata: + labels: + app: ssv-node-6 + spec: + containers: + - name: ssv-node-6 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12006 + name: port-12006 + protocol: UDP + hostPort: 12006 + - containerPort: 13006 + name: port-13006 + hostPort: 13006 + - containerPort: 15006 + name: port-15006 + hostPort: 15006 + - containerPort: 16006 + name: port-16006 + hostPort: 16006 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15006" + - name: SSV_API_PORT + value: "16006" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-6 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-6-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-6 + persistentVolumeClaim: + claimName: ssv-node-6 + - name: ssv-node-6-cm + configMap: + name: ssv-node-6-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml new file mode 100644 index 0000000000..ca0b3dc8cd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-60-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-60 +spec: + type: ClusterIP + ports: + - port: 12060 + protocol: UDP + targetPort: 12060 + name: port-12060 + - port: 13060 + protocol: TCP + targetPort: 13060 + name: port-13060 + - port: 15060 + protocol: TCP + targetPort: 15060 + name: port-15060 + - port: 16060 + protocol: TCP + targetPort: 16060 + name: port-16060 + selector: + app: ssv-node-60 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-60 + name: ssv-node-60 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-60 + template: + metadata: + labels: + app: ssv-node-60 + spec: + containers: + - name: ssv-node-60 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12060 + name: port-12060 + protocol: UDP + hostPort: 12060 + - containerPort: 13060 + name: port-13060 + hostPort: 13060 + - containerPort: 15060 + name: port-15060 + hostPort: 15060 + - containerPort: 16060 + name: port-16060 + hostPort: 16060 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15060" + - name: SSV_API_PORT + value: "16060" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-60 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-60-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-60 + persistentVolumeClaim: + claimName: ssv-node-60 + - name: ssv-node-60-cm + configMap: + name: ssv-node-60-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml new file mode 100644 index 0000000000..339c551727 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-61-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-61 +spec: + type: ClusterIP + ports: + - port: 12061 + protocol: UDP + targetPort: 12061 + name: port-12061 + - port: 13061 + protocol: TCP + targetPort: 13061 + name: port-13061 + - port: 15061 + protocol: TCP + targetPort: 15061 + name: port-15061 + - port: 16061 + protocol: TCP + targetPort: 16061 + name: port-16061 + selector: + app: ssv-node-61 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-61 + name: ssv-node-61 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-61 + template: + metadata: + labels: + app: ssv-node-61 + spec: + containers: + - name: ssv-node-61 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12061 + name: port-12061 + protocol: UDP + hostPort: 12061 + - containerPort: 13061 + name: port-13061 + hostPort: 13061 + - containerPort: 15061 + name: port-15061 + hostPort: 15061 + - containerPort: 16061 + name: port-16061 + hostPort: 16061 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15061" + - name: SSV_API_PORT + value: "16061" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-61 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-61-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-61 + persistentVolumeClaim: + claimName: ssv-node-61 + - name: ssv-node-61-cm + configMap: + name: ssv-node-61-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml new file mode 100644 index 0000000000..531005618a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-62-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-62 +spec: + type: ClusterIP + ports: + - port: 12062 + protocol: UDP + targetPort: 12062 + name: port-12062 + - port: 13062 + protocol: TCP + targetPort: 13062 + name: port-13062 + - port: 15062 + protocol: TCP + targetPort: 15062 + name: port-15062 + - port: 16062 + protocol: TCP + targetPort: 16062 + name: port-16062 + selector: + app: ssv-node-62 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-62 + name: ssv-node-62 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-62 + template: + metadata: + labels: + app: ssv-node-62 + spec: + containers: + - name: ssv-node-62 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12062 + name: port-12062 + protocol: UDP + hostPort: 12062 + - containerPort: 13062 + name: port-13062 + hostPort: 13062 + - containerPort: 15062 + name: port-15062 + hostPort: 15062 + - containerPort: 16062 + name: port-16062 + hostPort: 16062 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15062" + - name: SSV_API_PORT + value: "16062" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-62 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-62-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-62 + persistentVolumeClaim: + claimName: ssv-node-62 + - name: ssv-node-62-cm + configMap: + name: ssv-node-62-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml new file mode 100644 index 0000000000..39e261a3bf --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-63-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-63 +spec: + type: ClusterIP + ports: + - port: 12063 + protocol: UDP + targetPort: 12063 + name: port-12063 + - port: 13063 + protocol: TCP + targetPort: 13063 + name: port-13063 + - port: 15063 + protocol: TCP + targetPort: 15063 + name: port-15063 + - port: 16063 + protocol: TCP + targetPort: 16063 + name: port-16063 + selector: + app: ssv-node-63 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-63 + name: ssv-node-63 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-63 + template: + metadata: + labels: + app: ssv-node-63 + spec: + containers: + - name: ssv-node-63 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12063 + name: port-12063 + protocol: UDP + hostPort: 12063 + - containerPort: 13063 + name: port-13063 + hostPort: 13063 + - containerPort: 15063 + name: port-15063 + hostPort: 15063 + - containerPort: 16063 + name: port-16063 + hostPort: 16063 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15063" + - name: SSV_API_PORT + value: "16063" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-63 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-63-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-63 + persistentVolumeClaim: + claimName: ssv-node-63 + - name: ssv-node-63-cm + configMap: + name: ssv-node-63-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml new file mode 100644 index 0000000000..709fc026fa --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-64-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-64 +spec: + type: ClusterIP + ports: + - port: 12064 + protocol: UDP + targetPort: 12064 + name: port-12064 + - port: 13064 + protocol: TCP + targetPort: 13064 + name: port-13064 + - port: 15064 + protocol: TCP + targetPort: 15064 + name: port-15064 + - port: 16064 + protocol: TCP + targetPort: 16064 + name: port-16064 + selector: + app: ssv-node-64 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-64 + name: ssv-node-64 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-64 + template: + metadata: + labels: + app: ssv-node-64 + spec: + containers: + - name: ssv-node-64 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12064 + name: port-12064 + protocol: UDP + hostPort: 12064 + - containerPort: 13064 + name: port-13064 + hostPort: 13064 + - containerPort: 15064 + name: port-15064 + hostPort: 15064 + - containerPort: 16064 + name: port-16064 + hostPort: 16064 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15064" + - name: SSV_API_PORT + value: "16064" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-64 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-64-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-64 + persistentVolumeClaim: + claimName: ssv-node-64 + - name: ssv-node-64-cm + configMap: + name: ssv-node-64-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml new file mode 100644 index 0000000000..7872f5efef --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-65-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-65 +spec: + type: ClusterIP + ports: + - port: 12065 + protocol: UDP + targetPort: 12065 + name: port-12065 + - port: 13065 + protocol: TCP + targetPort: 13065 + name: port-13065 + - port: 15065 + protocol: TCP + targetPort: 15065 + name: port-15065 + - port: 16065 + protocol: TCP + targetPort: 16065 + name: port-16065 + selector: + app: ssv-node-65 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-65 + name: ssv-node-65 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-65 + template: + metadata: + labels: + app: ssv-node-65 + spec: + containers: + - name: ssv-node-65 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12065 + name: port-12065 + protocol: UDP + hostPort: 12065 + - containerPort: 13065 + name: port-13065 + hostPort: 13065 + - containerPort: 15065 + name: port-15065 + hostPort: 15065 + - containerPort: 16065 + name: port-16065 + hostPort: 16065 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15065" + - name: SSV_API_PORT + value: "16065" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-65 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-65-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-65 + persistentVolumeClaim: + claimName: ssv-node-65 + - name: ssv-node-65-cm + configMap: + name: ssv-node-65-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml new file mode 100644 index 0000000000..8cf3d90cfe --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-66-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-66 +spec: + type: ClusterIP + ports: + - port: 12066 + protocol: UDP + targetPort: 12066 + name: port-12066 + - port: 13066 + protocol: TCP + targetPort: 13066 + name: port-13066 + - port: 15066 + protocol: TCP + targetPort: 15066 + name: port-15066 + - port: 16066 + protocol: TCP + targetPort: 16066 + name: port-16066 + selector: + app: ssv-node-66 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-66 + name: ssv-node-66 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-66 + template: + metadata: + labels: + app: ssv-node-66 + spec: + containers: + - name: ssv-node-66 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12066 + name: port-12066 + protocol: UDP + hostPort: 12066 + - containerPort: 13066 + name: port-13066 + hostPort: 13066 + - containerPort: 15066 + name: port-15066 + hostPort: 15066 + - containerPort: 16066 + name: port-16066 + hostPort: 16066 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15066" + - name: SSV_API_PORT + value: "16066" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-66 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-66-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-66 + persistentVolumeClaim: + claimName: ssv-node-66 + - name: ssv-node-66-cm + configMap: + name: ssv-node-66-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml new file mode 100644 index 0000000000..b9620a8b44 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-67-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-67 +spec: + type: ClusterIP + ports: + - port: 12067 + protocol: UDP + targetPort: 12067 + name: port-12067 + - port: 13067 + protocol: TCP + targetPort: 13067 + name: port-13067 + - port: 15067 + protocol: TCP + targetPort: 15067 + name: port-15067 + - port: 16067 + protocol: TCP + targetPort: 16067 + name: port-16067 + selector: + app: ssv-node-67 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-67 + name: ssv-node-67 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-67 + template: + metadata: + labels: + app: ssv-node-67 + spec: + containers: + - name: ssv-node-67 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12067 + name: port-12067 + protocol: UDP + hostPort: 12067 + - containerPort: 13067 + name: port-13067 + hostPort: 13067 + - containerPort: 15067 + name: port-15067 + hostPort: 15067 + - containerPort: 16067 + name: port-16067 + hostPort: 16067 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15067" + - name: SSV_API_PORT + value: "16067" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-67 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-67-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-67 + persistentVolumeClaim: + claimName: ssv-node-67 + - name: ssv-node-67-cm + configMap: + name: ssv-node-67-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml new file mode 100644 index 0000000000..b7252d580e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-68-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-68 +spec: + type: ClusterIP + ports: + - port: 12068 + protocol: UDP + targetPort: 12068 + name: port-12068 + - port: 13068 + protocol: TCP + targetPort: 13068 + name: port-13068 + - port: 15068 + protocol: TCP + targetPort: 15068 + name: port-15068 + - port: 16068 + protocol: TCP + targetPort: 16068 + name: port-16068 + selector: + app: ssv-node-68 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-68 + name: ssv-node-68 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-68 + template: + metadata: + labels: + app: ssv-node-68 + spec: + containers: + - name: ssv-node-68 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12068 + name: port-12068 + protocol: UDP + hostPort: 12068 + - containerPort: 13068 + name: port-13068 + hostPort: 13068 + - containerPort: 15068 + name: port-15068 + hostPort: 15068 + - containerPort: 16068 + name: port-16068 + hostPort: 16068 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15068" + - name: SSV_API_PORT + value: "16068" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-68 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-68-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-68 + persistentVolumeClaim: + claimName: ssv-node-68 + - name: ssv-node-68-cm + configMap: + name: ssv-node-68-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml new file mode 100644 index 0000000000..6372ddf492 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-69-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-69 +spec: + type: ClusterIP + ports: + - port: 12069 + protocol: UDP + targetPort: 12069 + name: port-12069 + - port: 13069 + protocol: TCP + targetPort: 13069 + name: port-13069 + - port: 15069 + protocol: TCP + targetPort: 15069 + name: port-15069 + - port: 16069 + protocol: TCP + targetPort: 16069 + name: port-16069 + selector: + app: ssv-node-69 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-69 + name: ssv-node-69 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-69 + template: + metadata: + labels: + app: ssv-node-69 + spec: + containers: + - name: ssv-node-69 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12069 + name: port-12069 + protocol: UDP + hostPort: 12069 + - containerPort: 13069 + name: port-13069 + hostPort: 13069 + - containerPort: 15069 + name: port-15069 + hostPort: 15069 + - containerPort: 16069 + name: port-16069 + hostPort: 16069 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15069" + - name: SSV_API_PORT + value: "16069" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-69 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-69-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-69 + persistentVolumeClaim: + claimName: ssv-node-69 + - name: ssv-node-69-cm + configMap: + name: ssv-node-69-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml new file mode 100644 index 0000000000..49101753c1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-7-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-7 +spec: + type: ClusterIP + ports: + - port: 12007 + protocol: UDP + targetPort: 12007 + name: port-12007 + - port: 13007 + protocol: TCP + targetPort: 13007 + name: port-13007 + - port: 15007 + protocol: TCP + targetPort: 15007 + name: port-15007 + - port: 16007 + protocol: TCP + targetPort: 16007 + name: port-16007 + selector: + app: ssv-node-7 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-7 + name: ssv-node-7 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-7 + template: + metadata: + labels: + app: ssv-node-7 + spec: + containers: + - name: ssv-node-7 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12007 + name: port-12007 + protocol: UDP + hostPort: 12007 + - containerPort: 13007 + name: port-13007 + hostPort: 13007 + - containerPort: 15007 + name: port-15007 + hostPort: 15007 + - containerPort: 16007 + name: port-16007 + hostPort: 16007 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15007" + - name: SSV_API_PORT + value: "16007" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-7 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-7-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-7 + persistentVolumeClaim: + claimName: ssv-node-7 + - name: ssv-node-7-cm + configMap: + name: ssv-node-7-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml new file mode 100644 index 0000000000..d9cb6b3604 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-70-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-70 +spec: + type: ClusterIP + ports: + - port: 12070 + protocol: UDP + targetPort: 12070 + name: port-12070 + - port: 13070 + protocol: TCP + targetPort: 13070 + name: port-13070 + - port: 15070 + protocol: TCP + targetPort: 15070 + name: port-15070 + - port: 16070 + protocol: TCP + targetPort: 16070 + name: port-16070 + selector: + app: ssv-node-70 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-70 + name: ssv-node-70 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-70 + template: + metadata: + labels: + app: ssv-node-70 + spec: + containers: + - name: ssv-node-70 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12070 + name: port-12070 + protocol: UDP + hostPort: 12070 + - containerPort: 13070 + name: port-13070 + hostPort: 13070 + - containerPort: 15070 + name: port-15070 + hostPort: 15070 + - containerPort: 16070 + name: port-16070 + hostPort: 16070 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15070" + - name: SSV_API_PORT + value: "16070" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-70 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-70-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-70 + persistentVolumeClaim: + claimName: ssv-node-70 + - name: ssv-node-70-cm + configMap: + name: ssv-node-70-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml new file mode 100644 index 0000000000..cde1e7cd7e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-71-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-71 +spec: + type: ClusterIP + ports: + - port: 12071 + protocol: UDP + targetPort: 12071 + name: port-12071 + - port: 13071 + protocol: TCP + targetPort: 13071 + name: port-13071 + - port: 15071 + protocol: TCP + targetPort: 15071 + name: port-15071 + - port: 16071 + protocol: TCP + targetPort: 16071 + name: port-16071 + selector: + app: ssv-node-71 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-71 + name: ssv-node-71 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-71 + template: + metadata: + labels: + app: ssv-node-71 + spec: + containers: + - name: ssv-node-71 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12071 + name: port-12071 + protocol: UDP + hostPort: 12071 + - containerPort: 13071 + name: port-13071 + hostPort: 13071 + - containerPort: 15071 + name: port-15071 + hostPort: 15071 + - containerPort: 16071 + name: port-16071 + hostPort: 16071 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15071" + - name: SSV_API_PORT + value: "16071" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-71 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-71-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-71 + persistentVolumeClaim: + claimName: ssv-node-71 + - name: ssv-node-71-cm + configMap: + name: ssv-node-71-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml new file mode 100644 index 0000000000..11b639df29 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-72-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-72 +spec: + type: ClusterIP + ports: + - port: 12072 + protocol: UDP + targetPort: 12072 + name: port-12072 + - port: 13072 + protocol: TCP + targetPort: 13072 + name: port-13072 + - port: 15072 + protocol: TCP + targetPort: 15072 + name: port-15072 + - port: 16072 + protocol: TCP + targetPort: 16072 + name: port-16072 + selector: + app: ssv-node-72 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-72 + name: ssv-node-72 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-72 + template: + metadata: + labels: + app: ssv-node-72 + spec: + containers: + - name: ssv-node-72 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12072 + name: port-12072 + protocol: UDP + hostPort: 12072 + - containerPort: 13072 + name: port-13072 + hostPort: 13072 + - containerPort: 15072 + name: port-15072 + hostPort: 15072 + - containerPort: 16072 + name: port-16072 + hostPort: 16072 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15072" + - name: SSV_API_PORT + value: "16072" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-72 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-72-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-72 + persistentVolumeClaim: + claimName: ssv-node-72 + - name: ssv-node-72-cm + configMap: + name: ssv-node-72-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml new file mode 100644 index 0000000000..a08bd81e24 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-8-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-8 +spec: + type: ClusterIP + ports: + - port: 12008 + protocol: UDP + targetPort: 12008 + name: port-12008 + - port: 13008 + protocol: TCP + targetPort: 13008 + name: port-13008 + - port: 15008 + protocol: TCP + targetPort: 15008 + name: port-15008 + - port: 16008 + protocol: TCP + targetPort: 16008 + name: port-16008 + selector: + app: ssv-node-8 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-8 + name: ssv-node-8 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-8 + template: + metadata: + labels: + app: ssv-node-8 + spec: + containers: + - name: ssv-node-8 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12008 + name: port-12008 + protocol: UDP + hostPort: 12008 + - containerPort: 13008 + name: port-13008 + hostPort: 13008 + - containerPort: 15008 + name: port-15008 + hostPort: 15008 + - containerPort: 16008 + name: port-16008 + hostPort: 16008 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15008" + - name: SSV_API_PORT + value: "16008" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-8 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-8-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-8 + persistentVolumeClaim: + claimName: ssv-node-8 + - name: ssv-node-8-cm + configMap: + name: ssv-node-8-cm + hostNetwork: true diff --git a/.k8/stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-9-deployment.yml rename to .k8/hetzner-stage/ssv-node-9-deployment.yml index 37098e1a0a..1dc1e6c2a3 100644 --- a/.k8/stage/ssv-node-9-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-9 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-9 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-9 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-9 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-9 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-9-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-9 persistentVolumeClaim: claimName: ssv-node-9 - - name: ssv-cm-validator-options-9 + - name: ssv-node-9-cm configMap: - name: ssv-cm-validator-options-9 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-9-cm hostNetwork: true diff --git a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml index 7f2616196a..49f3ad5f29 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16017" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-1 diff --git a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml index 2484e7c214..8cfa6f3d6b 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16018" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-2 diff --git a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml index 2b0b836915..1b197f8f51 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16019" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-3 diff --git a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml index 048e021889..5e83a864eb 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16020" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-4 diff --git a/.k8/stage/scripts/deploy-holesky-exporters.sh b/.k8/stage/scripts/deploy-holesky-exporters.sh new file mode 100755 index 0000000000..9a899ef3d3 --- /dev/null +++ b/.k8/stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/ssv-node-v2-1-deployment.yml b/.k8/stage/ssv-node-v2-1-deployment.yml index 340d2a3419..a62ac399cb 100644 --- a/.k8/stage/ssv-node-v2-1-deployment.yml +++ b/.k8/stage/ssv-node-v2-1-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-2-deployment.yml b/.k8/stage/ssv-node-v2-2-deployment.yml index ccb63c8cde..bc728de072 100644 --- a/.k8/stage/ssv-node-v2-2-deployment.yml +++ b/.k8/stage/ssv-node-v2-2-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12002" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-3-deployment.yml b/.k8/stage/ssv-node-v2-3-deployment.yml index d30d7648a5..81ca74db36 100644 --- a/.k8/stage/ssv-node-v2-3-deployment.yml +++ b/.k8/stage/ssv-node-v2-3-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12003" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-4-deployment.yml b/.k8/stage/ssv-node-v2-4-deployment.yml index de012b24f7..a1b98d28a1 100644 --- a/.k8/stage/ssv-node-v2-4-deployment.yml +++ b/.k8/stage/ssv-node-v2-4-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12004" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-5-deployment.yml b/.k8/stage/ssv-node-v2-5-deployment.yml index 8e0a8436a8..c7446bafdf 100644 --- a/.k8/stage/ssv-node-v2-5-deployment.yml +++ b/.k8/stage/ssv-node-v2-5-deployment.yml @@ -104,9 +104,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -120,7 +120,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-v2-5 diff --git a/.k8/stage/ssv-node-v2-6-deployment.yml b/.k8/stage/ssv-node-v2-6-deployment.yml index 1fddf2a098..b56673db9e 100644 --- a/.k8/stage/ssv-node-v2-6-deployment.yml +++ b/.k8/stage/ssv-node-v2-6-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-7-deployment.yml b/.k8/stage/ssv-node-v2-7-deployment.yml index b4a4b93e72..4e61986511 100644 --- a/.k8/stage/ssv-node-v2-7-deployment.yml +++ b/.k8/stage/ssv-node-v2-7-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -122,7 +122,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-v2-7 diff --git a/.k8/stage/ssv-node-v2-8-deployment.yml b/.k8/stage/ssv-node-v2-8-deployment.yml index af3607ba5c..745fb3a3ea 100644 --- a/.k8/stage/ssv-node-v2-8-deployment.yml +++ b/.k8/stage/ssv-node-v2-8-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/Dockerfile b/Dockerfile index 0faa9e340e..44c362dcfc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,6 @@ RUN apt-get update && \ git=1:2.39.2-1.1 \ zip=3.0-13 \ unzip=6.0-28 \ - wget=1.21.3-1+b2 \ g++=4:12.2.0-3 \ gcc-aarch64-linux-gnu=4:12.2.0-3 \ bzip2=1.0.8-5+b1 \ @@ -61,7 +60,7 @@ RUN apk -v --update add \ ca-certificates=20230506-r0 \ bash=5.2.15-r5 \ make=4.4.1-r1 \ - bind-tools=9.18.16-r0 && \ + bind-tools=9.18.19-r0 && \ rm /var/cache/apk/* COPY --from=builder /go/bin/ssvnode /go/bin/ssvnode diff --git a/beacon/goclient/goclient.go b/beacon/goclient/goclient.go index 8fe1216155..de3ed18c0d 100644 --- a/beacon/goclient/goclient.go +++ b/beacon/goclient/goclient.go @@ -20,7 +20,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ) @@ -122,6 +122,7 @@ type Client interface { eth2client.BlindedBeaconBlockProposalProvider eth2client.BlindedBeaconBlockSubmitter eth2client.ValidatorRegistrationsSubmitter + eth2client.VoluntaryExitSubmitter } type NodeClientProvider interface { @@ -147,7 +148,7 @@ type goClient struct { } // New init new client and go-client instance -func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTicker slot_ticker.Ticker) (beaconprotocol.BeaconNode, error) { +func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTickerProvider slotticker.Provider) (beaconprotocol.BeaconNode, error) { logger.Info("consensus client: connecting", fields.Address(opt.BeaconNodeAddr), fields.Network(string(opt.Network.BeaconNetwork))) httpClient, err := http.New(opt.Context, @@ -161,9 +162,6 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op return nil, errors.WithMessage(err, "failed to create http client") } - tickerChan := make(chan phase0.Slot, 32) - slotTicker.Subscribe(tickerChan) - client := &goClient{ log: logger, ctx: opt.Context, @@ -190,7 +188,7 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op ) // Start registration submitter. - go client.registrationSubmitter(tickerChan) + go client.registrationSubmitter(slotTickerProvider) return client, nil } diff --git a/beacon/goclient/proposer.go b/beacon/goclient/proposer.go index cb48d5e33c..38d7f4f565 100644 --- a/beacon/goclient/proposer.go +++ b/beacon/goclient/proposer.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/slotticker" ) const ( @@ -230,9 +231,15 @@ func (gc *goClient) createValidatorRegistration(pubkey []byte, feeRecipient bell return signedReg } -func (gc *goClient) registrationSubmitter(slots <-chan phase0.Slot) { - for currentSlot := range slots { - gc.submitRegistrationsFromCache(currentSlot) +func (gc *goClient) registrationSubmitter(slotTickerProvider slotticker.Provider) { + ticker := slotTickerProvider() + for { + select { + case <-gc.ctx.Done(): + return + case <-ticker.Next(): + gc.submitRegistrationsFromCache(ticker.Slot()) + } } } diff --git a/beacon/goclient/voluntary_exit.go b/beacon/goclient/voluntary_exit.go new file mode 100644 index 0000000000..bb2dfaa62f --- /dev/null +++ b/beacon/goclient/voluntary_exit.go @@ -0,0 +1,10 @@ +package goclient + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" +) + +func (gc *goClient) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + return errors.New("not implemented") +} diff --git a/cli/operator/node.go b/cli/operator/node.go index 4dd14f558a..9c20e2fda0 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -19,7 +19,6 @@ import ( "github.com/bloxapp/ssv/api/handlers" apiserver "github.com/bloxapp/ssv/api/server" - "github.com/bloxapp/ssv/beacon/goclient" global_config "github.com/bloxapp/ssv/cli/config" "github.com/bloxapp/ssv/ekm" @@ -34,6 +33,7 @@ import ( ssv_identity "github.com/bloxapp/ssv/identity" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/migrations" "github.com/bloxapp/ssv/monitoring/metrics" "github.com/bloxapp/ssv/monitoring/metricsreporter" @@ -42,9 +42,11 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/nodeprobe" "github.com/bloxapp/ssv/operator" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -60,6 +62,10 @@ type KeyStore struct { PasswordFile string `yaml:"PasswordFile" env:"PASSWORD_FILE" env-description:"Password for operator private key file decryption"` } +type MessageValidation struct { + VerifySignatures bool `yaml:"VerifySignatures" env:"MESSAGE_VALIDATION_VERIFY_SIGNATURES" env-default:"false" env-description:"Experimental feature to verify signatures in pubsub's message validation instead of in consensus protocol."` +} + type config struct { global_config.GlobalConfig `yaml:"global"` DBOptions basedb.Options `yaml:"db"` @@ -72,13 +78,11 @@ type config struct { MetricsAPIPort int `yaml:"MetricsAPIPort" env:"METRICS_API_PORT" env-description:"Port to listen on for the metrics API."` EnableProfile bool `yaml:"EnableProfile" env:"ENABLE_PROFILE" env-description:"flag that indicates whether go profiling tools are enabled"` NetworkPrivateKey string `yaml:"NetworkPrivateKey" env:"NETWORK_PRIVATE_KEY" env-description:"private key for network identity"` - - WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` - WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` - - SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` - - LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` + WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` + SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` + LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + MessageValidation MessageValidation `yaml:"MessageValidation"` } var cfg config @@ -97,6 +101,11 @@ var StartNodeCmd = &cobra.Command{ log.Fatal("could not create logger", err) } defer logging.CapturePanic(logger) + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + networkConfig, err := setupSSVNetwork(logger) if err != nil { logger.Fatal("could not setup network", zap.Error(err)) @@ -128,28 +137,16 @@ var StartNodeCmd = &cobra.Command{ return currentEpoch >= cfg.P2pNetworkConfig.PermissionedActivateEpoch && currentEpoch < cfg.P2pNetworkConfig.PermissionedDeactivateEpoch } - cfg.P2pNetworkConfig.Permissioned = permissioned - cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) - cfg.P2pNetworkConfig.NodeStorage = nodeStorage - cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) - cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode - cfg.P2pNetworkConfig.Network = networkConfig - - p2pNetwork := setupP2P(logger, db) - - slotTicker := slot_ticker.NewTicker(cmd.Context(), networkConfig) - - metricsReporter := metricsreporter.New( - metricsreporter.WithLogger(logger), - ) + slotTickerProvider := func() slotticker.SlotTicker { + return slotticker.New(networkConfig) + } cfg.ConsensusClient.Context = cmd.Context() - cfg.ConsensusClient.Graffiti = []byte("SSV.Network") cfg.ConsensusClient.GasLimit = spectypes.DefaultGasLimit cfg.ConsensusClient.Network = networkConfig.Beacon.GetNetwork() - consensusClient := setupConsensusClient(logger, operatorData.ID, slotTicker) + consensusClient := setupConsensusClient(logger, operatorData.ID, slotTickerProvider) executionClient, err := executionclient.New( cmd.Context(), @@ -166,6 +163,36 @@ var StartNodeCmd = &cobra.Command{ logger.Fatal("could not connect to execution client", zap.Error(err)) } + cfg.P2pNetworkConfig.Permissioned = permissioned + cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) + cfg.P2pNetworkConfig.NodeStorage = nodeStorage + cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) + cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode + cfg.P2pNetworkConfig.Network = networkConfig + + validatorsMap := validatorsmap.New(cmd.Context()) + + dutyStore := dutystore.New() + cfg.SSVOptions.DutyStore = dutyStore + + messageValidator := validation.NewMessageValidator( + networkConfig, + validation.WithShareStorage(nodeStorage.Shares()), + validation.WithLogger(logger), + validation.WithMetrics(metricsReporter), + validation.WithDutyStore(dutyStore), + validation.WithOwnOperatorID(operatorData.ID), + validation.WithSignatureVerification(cfg.MessageValidation.VerifySignatures), + ) + + cfg.P2pNetworkConfig.Metrics = metricsReporter + cfg.P2pNetworkConfig.MessageValidator = messageValidator + cfg.SSVOptions.ValidatorOptions.MessageValidator = messageValidator + // if signature check is enabled in message validation then it's disabled in validator controller and vice versa + cfg.SSVOptions.ValidatorOptions.VerifySignatures = !cfg.MessageValidation.VerifySignatures + + p2pNetwork := setupP2P(logger, db) + cfg.SSVOptions.Context = cmd.Context() cfg.SSVOptions.DB = db cfg.SSVOptions.BeaconNode = consensusClient @@ -178,6 +205,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.Network = p2pNetwork cfg.SSVOptions.ValidatorOptions.Beacon = consensusClient cfg.SSVOptions.ValidatorOptions.KeyManager = keyManager + cfg.SSVOptions.ValidatorOptions.ValidatorsMap = validatorsMap cfg.SSVOptions.ValidatorOptions.ShareEncryptionKeyProvider = nodeStorage.GetPrivateKey cfg.SSVOptions.ValidatorOptions.OperatorData = operatorData @@ -209,12 +237,12 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.StorageMap = storageMap cfg.SSVOptions.ValidatorOptions.Metrics = metricsReporter + cfg.SSVOptions.Metrics = metricsReporter validatorCtrl := validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) cfg.SSVOptions.ValidatorController = validatorCtrl - cfg.SSVOptions.Metrics = metricsReporter - operatorNode = operator.New(logger, cfg.SSVOptions, slotTicker) + operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider) if cfg.MetricsAPIPort > 0 { go startMetricsHandler(cmd.Context(), logger, db, metricsReporter, cfg.MetricsAPIPort, cfg.EnableProfile) @@ -477,10 +505,7 @@ func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { return networkConfig, nil } -func setupP2P( - logger *zap.Logger, - db basedb.Database, -) network.P2PNetwork { +func setupP2P(logger *zap.Logger, db basedb.Database) network.P2PNetwork { istore := ssv_identity.NewIdentityStore(db) netPrivKey, err := istore.SetupNetworkKey(logger, cfg.NetworkPrivateKey) if err != nil { @@ -494,9 +519,9 @@ func setupP2P( func setupConsensusClient( logger *zap.Logger, operatorID spectypes.OperatorID, - slotTicker slot_ticker.Ticker, + slotTickerProvider slotticker.Provider, ) beaconprotocol.BeaconNode { - cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTicker) + cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTickerProvider) if err != nil { logger.Fatal("failed to create beacon go-client", zap.Error(err), fields.Address(cfg.ConsensusClient.BeaconNodeAddr)) diff --git a/docs/OPERATOR_GETTING_STARTED.md b/docs/OPERATOR_GETTING_STARTED.md index f46fdd08b1..d99c30ae52 100644 --- a/docs/OPERATOR_GETTING_STARTED.md +++ b/docs/OPERATOR_GETTING_STARTED.md @@ -148,7 +148,8 @@ OperatorPrivateKey: LS0tLS... ### 6. Start SSV Node in Docker -Run the docker image in the same folder you created the `config.yaml`: +Before start, make sure the clock is synced with NTP servers. +Then, run the docker image in the same folder you created the `config.yaml`: ```shell $ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config.yaml -p 13001:13001 -p 12001:12001/udp -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data --log-opt max-size=500m --log-opt max-file=10 -it 'bloxstaking/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node \ diff --git a/ekm/eth_key_manager_signer.go b/ekm/eth_key_manager_signer.go index 21c663a5b1..6d4f098e00 100644 --- a/ekm/eth_key_manager_signer.go +++ b/ekm/eth_key_manager_signer.go @@ -29,9 +29,16 @@ import ( "github.com/bloxapp/ssv/storage/basedb" ) -// minimal att&block epoch/slot distance to protect slashing -var minimalAttSlashingProtectionEpochDistance = phase0.Epoch(0) -var minimalBlockSlashingProtectionSlotDistance = phase0.Slot(0) +const ( + // minSPAttestationEpochGap is the minimum epoch distance used for slashing protection in attestations. + // It defines the smallest allowable gap between the source and target epochs in an existing attestation + // and those in a new attestation, helping to prevent slashable offenses. + minSPAttestationEpochGap = phase0.Epoch(0) + // minSPProposalSlotGap is the minimum slot distance used for slashing protection in block proposals. + // It defines the smallest allowable gap between the current slot and the slot of a new block proposal, + // helping to prevent slashable offenses. + minSPProposalSlotGap = phase0.Slot(0) +) type ethKeyManagerSigner struct { wallet core.Wallet @@ -43,9 +50,17 @@ type ethKeyManagerSigner struct { builderProposals bool } +// StorageProvider provides the underlying KeyManager storage. +type StorageProvider interface { + ListAccounts() ([]core.ValidatorAccount, error) + RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) + RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) + BumpSlashingProtection(pubKey []byte) error +} + // NewETHKeyManagerSigner returns a new instance of ethKeyManagerSigner func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network networkconfig.NetworkConfig, builderProposals bool, encryptionKey string) (spectypes.KeyManager, error) { - signerStore := NewSignerStorage(db, network.Beacon.GetNetwork(), logger) + signerStore := NewSignerStorage(db, network.Beacon, logger) if encryptionKey != "" { err := signerStore.SetEncryptionKey(encryptionKey) if err != nil { @@ -85,6 +100,18 @@ func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network netw }, nil } +func (km *ethKeyManagerSigner) ListAccounts() ([]core.ValidatorAccount, error) { + return km.storage.ListAccounts() +} + +func (km *ethKeyManagerSigner) RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) { + return km.storage.RetrieveHighestAttestation(pubKey) +} + +func (km *ethKeyManagerSigner) RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) { + return km.storage.RetrieveHighestProposal(pubKey) +} + func (km *ethKeyManagerSigner) SignBeaconObject(obj ssz.HashRoot, domain phase0.Domain, pk []byte, domainType phase0.DomainType) (spectypes.Signature, [32]byte, error) { sig, rootSlice, err := km.signBeaconObject(obj, domain, pk, domainType) if err != nil { @@ -260,9 +287,8 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return errors.Wrap(err, "could not check share existence") } if acc == nil { - currentSlot := km.storage.Network().EstimatedCurrentSlot() - if err := km.saveMinimalSlashingProtection(shareKey.GetPublicKey().Serialize(), currentSlot); err != nil { - return errors.Wrap(err, "could not save minimal slashing protection") + if err := km.BumpSlashingProtection(shareKey.GetPublicKey().Serialize()); err != nil { + return errors.Wrap(err, "could not bump slashing protection") } if err := km.saveShare(shareKey); err != nil { return errors.Wrap(err, "could not save share") @@ -272,23 +298,6 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return nil } -func (km *ethKeyManagerSigner) saveMinimalSlashingProtection(pk []byte, currentSlot phase0.Slot) error { - currentEpoch := km.storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance - highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance - - minAttData := minimalAttProtectionData(highestSource, highestTarget) - - if err := km.storage.SaveHighestAttestation(pk, minAttData); err != nil { - return errors.Wrapf(err, "could not save minimal highest attestation for %s", string(pk)) - } - if err := km.storage.SaveHighestProposal(pk, highestProposal); err != nil { - return errors.Wrapf(err, "could not save minimal highest proposal for %s", string(pk)) - } - return nil -} - func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { km.walletLock.Lock() defer km.walletLock.Unlock() @@ -315,28 +324,110 @@ func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { return nil } -func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { - key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") +// BumpSlashingProtection updates the slashing protection data for a given public key. +func (km *ethKeyManagerSigner) BumpSlashingProtection(pubKey []byte) error { + currentSlot := km.storage.BeaconNetwork().EstimatedCurrentSlot() + + // Update highest attestation data for slashing protection. + if err := km.updateHighestAttestation(pubKey, currentSlot); err != nil { + return err + } + + // Update highest proposal data for slashing protection. + if err := km.updateHighestProposal(pubKey, currentSlot); err != nil { + return err + } + + return nil +} + +// updateHighestAttestation updates the highest attestation data for slashing protection. +func (km *ethKeyManagerSigner) updateHighestAttestation(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest attestation data stored for the given public key. + retrievedHighAtt, found, err := km.RetrieveHighestAttestation(pubKey) if err != nil { - return errors.Wrap(err, "could not generate HDKey") + return fmt.Errorf("could not retrieve highest attestation: %w", err) } - account := wallets.NewValidatorAccount("", key, nil, "", nil) - if err := km.wallet.AddValidatorAccount(account); err != nil { - return errors.Wrap(err, "could not save new account") + + currentEpoch := km.storage.BeaconNetwork().EstimatedEpochAtSlot(slot) + minimalSP := km.computeMinimalAttestationSP(currentEpoch) + + // Check if the retrieved highest attestation data is valid and not outdated. + if found && retrievedHighAtt != nil { + if retrievedHighAtt.Source.Epoch >= minimalSP.Source.Epoch || retrievedHighAtt.Target.Epoch >= minimalSP.Target.Epoch { + return nil + } } + + // At this point, either the retrieved attestation data was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection data. + if err := km.storage.SaveHighestAttestation(pubKey, minimalSP); err != nil { + return fmt.Errorf("could not save highest attestation: %w", err) + } + + return nil +} + +// updateHighestProposal updates the highest proposal slot for slashing protection. +func (km *ethKeyManagerSigner) updateHighestProposal(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest proposal slot stored for the given public key. + retrievedHighProp, found, err := km.RetrieveHighestProposal(pubKey) + if err != nil { + return fmt.Errorf("could not retrieve highest proposal: %w", err) + } + + minimalSPSlot := km.computeMinimalProposerSP(slot) + + // Check if the retrieved highest proposal slot is valid and not outdated. + if found && retrievedHighProp != 0 { + if retrievedHighProp >= minimalSPSlot { + return nil + } + } + + // At this point, either the retrieved proposal slot was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection slot. + if err := km.storage.SaveHighestProposal(pubKey, minimalSPSlot); err != nil { + return fmt.Errorf("could not save highest proposal: %w", err) + } + return nil } -func minimalAttProtectionData(source, target phase0.Epoch) *phase0.AttestationData { +// computeMinimalAttestationSP calculates the minimal safe attestation data for slashing protection. +// It takes the current epoch as an argument and returns an AttestationData object with the minimal safe source and target epochs. +func (km *ethKeyManagerSigner) computeMinimalAttestationSP(epoch phase0.Epoch) *phase0.AttestationData { + // Calculate the highest safe target epoch based on the current epoch and a predefined minimum distance. + highestTarget := epoch + minSPAttestationEpochGap + // The highest safe source epoch is one less than the highest target epoch. + highestSource := highestTarget - 1 + + // Return a new AttestationData object with the calculated source and target epochs. return &phase0.AttestationData{ - BeaconBlockRoot: [32]byte{}, Source: &phase0.Checkpoint{ - Epoch: source, - Root: [32]byte{}, + Epoch: highestSource, }, Target: &phase0.Checkpoint{ - Epoch: target, - Root: [32]byte{}, + Epoch: highestTarget, }, } } + +// computeMinimalProposerSP calculates the minimal safe slot for a block proposal to avoid slashing. +// It takes the current slot as an argument and returns the minimal safe slot. +func (km *ethKeyManagerSigner) computeMinimalProposerSP(slot phase0.Slot) phase0.Slot { + // Calculate the highest safe proposal slot based on the current slot and a predefined minimum distance. + return slot + minSPProposalSlotGap +} + +func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { + key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") + if err != nil { + return errors.Wrap(err, "could not generate HDKey") + } + account := wallets.NewValidatorAccount("", key, nil, "", nil) + if err := km.wallet.AddValidatorAccount(account); err != nil { + return errors.Wrap(err, "could not save new account") + } + return nil +} diff --git a/ekm/signer_key_manager_test.go b/ekm/signer_key_manager_test.go index 4efe2c4fb3..65cf5df24c 100644 --- a/ekm/signer_key_manager_test.go +++ b/ekm/signer_key_manager_test.go @@ -7,26 +7,25 @@ import ( "encoding/hex" "testing" - "github.com/bloxapp/eth2-key-manager/core" - "github.com/bloxapp/eth2-key-manager/wallets/hd" - "github.com/bloxapp/ssv/utils/rsaencryption" - - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/storage/basedb" - "github.com/attestantio/go-eth2-client/spec/altair" "github.com/attestantio/go-eth2-client/spec/bellatrix" "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/bloxapp/eth2-key-manager/core" + "github.com/bloxapp/eth2-key-manager/wallets/hd" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/utils/threshold" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" ) const ( @@ -36,7 +35,7 @@ const ( pk2Str = "8796fafa576051372030a75c41caafea149e4368aebaca21c9f90d9974b3973d5cee7d7874e4ec9ec59fb2c8945b3e01" ) -func testKeyManager(t *testing.T) spectypes.KeyManager { +func testKeyManager(t *testing.T, network *networkconfig.NetworkConfig) spectypes.KeyManager { threshold.Init() logger := logging.TestLogger(t) @@ -44,7 +43,14 @@ func testKeyManager(t *testing.T) spectypes.KeyManager { db, err := getBaseStorage(logger) require.NoError(t, err) - km, err := NewETHKeyManagerSigner(logger, db, networkconfig.TestNetwork, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, nil), + Domain: networkconfig.TestNetwork.Domain, + } + } + + km, err := NewETHKeyManagerSigner(logger, db, *network, true, "") require.NoError(t, err) sk1 := &bls.SecretKey{} @@ -120,7 +126,7 @@ func TestEncryptedKeyManager(t *testing.T) { } func TestSlashing(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) sk1 := &bls.SecretKey{} require.NoError(t, sk1.SetHexString(sk1Str)) @@ -129,12 +135,12 @@ func TestSlashing(t *testing.T) { currentSlot := km.(*ethKeyManagerSigner).storage.Network().EstimatedCurrentSlot() currentEpoch := km.(*ethKeyManagerSigner).storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance + 1 + highestTarget := currentEpoch + minSPAttestationEpochGap + 1 highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance + 1 + highestProposal := currentSlot + minSPProposalSlotGap + 1 attestationData := &phase0.AttestationData{ - Slot: 30, + Slot: currentSlot, Index: 1, BeaconBlockRoot: [32]byte{1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2}, Source: &phase0.Checkpoint{ @@ -272,7 +278,7 @@ func TestSlashing(t *testing.T) { } func TestSlashing_Attestation(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) var secretKeys [4]*bls.SecretKey for i := range secretKeys { @@ -280,8 +286,7 @@ func TestSlashing_Attestation(t *testing.T) { secretKeys[i].SetByCSPRNG() // Equivalent to AddShare but with a custom slot for minimal slashing protection. - minimalSlot := phase0.Slot(64) - err := km.(*ethKeyManagerSigner).saveMinimalSlashingProtection(secretKeys[i].GetPublicKey().Serialize(), minimalSlot) + err := km.(*ethKeyManagerSigner).BumpSlashingProtection(secretKeys[i].GetPublicKey().Serialize()) require.NoError(t, err) err = km.(*ethKeyManagerSigner).saveShare(secretKeys[i]) require.NoError(t, err) @@ -317,6 +322,12 @@ func TestSlashing_Attestation(t *testing.T) { require.NoError(t, err, "expected no slashing") require.NotZero(t, sig, "expected non-zero signature") require.NotZero(t, root, "expected non-zero root") + + highAtt, found, err := km.(*ethKeyManagerSigner).storage.RetrieveHighestAttestation(sk.GetPublicKey().Serialize()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, attestation.Source.Epoch, highAtt.Source.Epoch) + require.Equal(t, attestation.Target.Epoch, highAtt.Target.Epoch) } } @@ -360,7 +371,7 @@ func TestSlashing_Attestation(t *testing.T) { func TestSignRoot(t *testing.T) { require.NoError(t, bls.Init(bls.BLS12_381)) - km := testKeyManager(t) + km := testKeyManager(t, nil) t.Run("pk 1", func(t *testing.T) { pk := &bls.PublicKey{} diff --git a/ekm/signer_storage.go b/ekm/signer_storage.go index 5991e6f321..fc8eadd62e 100644 --- a/ekm/signer_storage.go +++ b/ekm/signer_storage.go @@ -47,17 +47,19 @@ type Storage interface { SetEncryptionKey(newKey string) error ListAccountsTxn(r basedb.Reader) ([]core.ValidatorAccount, error) SaveAccountTxn(rw basedb.ReadWriter, account core.ValidatorAccount) error + + BeaconNetwork() beacon.BeaconNetwork } type storage struct { db basedb.Database - network beacon.Network + network beacon.BeaconNetwork encryptionKey []byte logger *zap.Logger // struct logger is used because core.Storage does not support passing a logger lock sync.RWMutex } -func NewSignerStorage(db basedb.Database, network beacon.Network, logger *zap.Logger) Storage { +func NewSignerStorage(db basedb.Database, network beacon.BeaconNetwork, logger *zap.Logger) Storage { return &storage{ db: db, network: network, @@ -87,7 +89,7 @@ func (s *storage) DropRegistryData() error { } func (s *storage) objPrefix(obj string) []byte { - return []byte(string(s.network.BeaconNetwork) + obj) + return []byte(string(s.network.GetBeaconNetwork()) + obj) } // Name returns storage name. @@ -97,7 +99,7 @@ func (s *storage) Name() string { // Network returns the network storage is related to. func (s *storage) Network() core.Network { - return core.Network(s.network.BeaconNetwork) + return core.Network(s.network.GetBeaconNetwork()) } // SaveWallet stores the given wallet. @@ -406,3 +408,7 @@ func (s *storage) decrypt(data []byte) ([]byte, error) { nonce, ciphertext := data[:nonceSize], data[nonceSize:] return gcm.Open(nil, nonce, ciphertext, nil) } + +func (s *storage) BeaconNetwork() beacon.BeaconNetwork { + return s.network +} diff --git a/eth/ethtest/cluster_liquidated_test.go b/eth/ethtest/cluster_liquidated_test.go new file mode 100644 index 0000000000..46ae795cef --- /dev/null +++ b/eth/ethtest/cluster_liquidated_test.go @@ -0,0 +1,91 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterLiquidatedInput struct { + *CommonTestInput + events []*ClusterLiquidatedEventInput +} + +func NewTestClusterLiquidatedInput(common *CommonTestInput) *testClusterLiquidatedInput { + return &testClusterLiquidatedInput{common, nil} +} + +func (input *testClusterLiquidatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterLiquidatedEventInput struct { + auth *bind.TransactOpts + ownerAddress *ethcommon.Address + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterLiquidatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.ownerAddress == nil: + return fmt.Errorf("validation error: input.ownerAddress is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterLiquidatedInput) prepare( + eventsToDo []*ClusterLiquidatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterLiquidatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Liquidate( + event.auth, + *event.ownerAddress, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/cluster_reactivated_test.go b/eth/ethtest/cluster_reactivated_test.go new file mode 100644 index 0000000000..664625f44b --- /dev/null +++ b/eth/ethtest/cluster_reactivated_test.go @@ -0,0 +1,87 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterReactivatedInput struct { + *CommonTestInput + events []*ClusterReactivatedEventInput +} + +func NewTestClusterReactivatedInput(common *CommonTestInput) *testClusterReactivatedInput { + return &testClusterReactivatedInput{common, nil} +} + +func (input *testClusterReactivatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterReactivatedEventInput struct { + auth *bind.TransactOpts + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterReactivatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterReactivatedInput) prepare( + eventsToDo []*ClusterReactivatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterReactivatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Reactivate( + event.auth, + event.opsIds, + big.NewInt(100_000_000), + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/common_test.go b/eth/ethtest/common_test.go new file mode 100644 index 0000000000..44105dee65 --- /dev/null +++ b/eth/ethtest/common_test.go @@ -0,0 +1,231 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "net/http/httptest" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/golang/mock/gomock" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/eth/eventsyncer" + "github.com/bloxapp/ssv/eth/executionclient" + "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/eth/simulator/simcontract" + "github.com/bloxapp/ssv/monitoring/metricsreporter" + "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator/mocks" +) + +type CommonTestInput struct { + t *testing.T + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + blockNum *uint64 + nodeStorage storage.Storage + doInOneBlock bool +} + +func NewCommonTestInput( + t *testing.T, + sim *simulator.SimulatedBackend, + boundContract *simcontract.Simcontract, + blockNum *uint64, + nodeStorage storage.Storage, + doInOneBlock bool, +) *CommonTestInput { + return &CommonTestInput{ + t: t, + sim: sim, + boundContract: boundContract, + blockNum: blockNum, + nodeStorage: nodeStorage, + doInOneBlock: doInOneBlock, + } +} + +type TestEnv struct { + eventSyncer *eventsyncer.EventSyncer + validators []*testValidatorData + ops []*testOperator + nodeStorage storage.Storage + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + auth *bind.TransactOpts + shares [][]byte + execClient *executionclient.ExecutionClient + rpcServer *rpc.Server + httpSrv *httptest.Server + validatorCtrl *mocks.MockController + mockCtrl *gomock.Controller + followDistance *uint64 +} + +func (e *TestEnv) shutdown() { + if e.mockCtrl != nil { + e.mockCtrl.Finish() + } + + if e.httpSrv != nil { + e.httpSrv.Close() + } + + if e.execClient != nil { + // Always returns nil error + _ = e.execClient.Close() + } +} + +func (e *TestEnv) setup( + t *testing.T, + ctx context.Context, + testAddresses []*ethcommon.Address, + validatorsCount uint64, + operatorsCount uint64, +) error { + if e.followDistance == nil { + e.SetDefaultFollowDistance() + } + logger := zaptest.NewLogger(t) + + // Create operators RSA keys + ops, err := createOperators(operatorsCount, 0) + if err != nil { + return err + } + + validators := make([]*testValidatorData, validatorsCount) + shares := make([][]byte, validatorsCount) + + // Create validators, BLS keys, shares + for i := 0; i < int(validatorsCount); i++ { + validators[i], err = createNewValidator(ops) + if err != nil { + return err + } + + shares[i], err = generateSharesData(validators[i], ops, testAddrAlice, i) + if err != nil { + return err + } + } + + eh, validatorCtrl, mockCtrl, nodeStorage, err := setupEventHandler(t, ctx, logger, ops[0], &testAddrAlice, true) + e.mockCtrl = mockCtrl + e.nodeStorage = nodeStorage + + if err != nil { + return err + } + if validatorCtrl == nil { + return fmt.Errorf("validatorCtrl is empty") + } + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) + + // Create JSON-RPC handler + rpcServer, err := sim.Node.RPCHandler() + e.rpcServer = rpcServer + if err != nil { + return fmt.Errorf("can't create RPC server: %w", err) + } + // Expose handler on a test server with ws open + httpSrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) + e.httpSrv = httpSrv + + addr := "ws:" + strings.TrimPrefix(httpSrv.URL, "http:") + + parsed, err := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) + if err != nil { + return fmt.Errorf("can't parse contract ABI: %w", err) + } + + auth, err := bind.NewKeyedTransactorWithChainID(testKeyAlice, big.NewInt(1337)) + if err != nil { + return err + } + + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + if err != nil { + return fmt.Errorf("deploy contract: %w", err) + } + + sim.Commit() + + // Check contract code at the simulated blockchain + contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + if err != nil { + return fmt.Errorf("get contract code: %w", err) + } + if len(contractCode) == 0 { + return fmt.Errorf("contractCode is empty") + } + + // Create a client and connect to the simulator + e.execClient, err = executionclient.New( + ctx, + addr, + contractAddr, + executionclient.WithLogger(logger), + executionclient.WithFollowDistance(*e.followDistance), + ) + if err != nil { + return err + } + + err = e.execClient.Healthy(ctx) + if err != nil { + return err + } + + e.boundContract, err = simcontract.NewSimcontract(contractAddr, sim) + if err != nil { + return err + } + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + + e.eventSyncer = eventsyncer.New( + nodeStorage, + e.execClient, + eh, + eventsyncer.WithLogger(logger), + eventsyncer.WithMetrics(metricsReporter), + ) + + e.validatorCtrl = validatorCtrl + e.sim = sim + e.auth = auth + e.validators = validators + e.ops = ops + e.shares = shares + + return nil +} + +func (e *TestEnv) SetDefaultFollowDistance() { + // 8 is current production offset + value := uint64(8) + e.followDistance = &value +} + +func (e *TestEnv) CloseFollowDistance(blockNum *uint64) { + for i := uint64(0); i < *e.followDistance; i++ { + commitBlock(e.sim, blockNum) + } +} + +func commitBlock(sim *simulator.SimulatedBackend, blockNum *uint64) { + sim.Commit() + *blockNum++ +} diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go new file mode 100644 index 0000000000..b38dd8ea3d --- /dev/null +++ b/eth/ethtest/eth_e2e_test.go @@ -0,0 +1,309 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +var ( + testKeyAlice, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testKeyBob, _ = crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + + testAddrAlice = crypto.PubkeyToAddress(testKeyAlice.PublicKey) + testAddrBob = crypto.PubkeyToAddress(testKeyBob.PublicKey) +) + +// E2E tests for ETH package +func TestEthExecLayer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddrAlice + testAddresses[1] = &testAddrBob + + cluster := &simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + } + + expectedNonce := registrystorage.Nonce(0) + + testEnv := TestEnv{} + testEnv.SetDefaultFollowDistance() + + defer testEnv.shutdown() + err := testEnv.setup(t, ctx, testAddresses, 7, 4) + require.NoError(t, err) + + var ( + auth = testEnv.auth + nodeStorage = testEnv.nodeStorage + sim = testEnv.sim + boundContract = testEnv.boundContract + ops = testEnv.ops + validators = testEnv.validators + eventSyncer = testEnv.eventSyncer + shares = testEnv.shares + validatorCtrl = testEnv.validatorCtrl + ) + + blockNum := uint64(0x1) + lastHandledBlockNum := uint64(0x1) + + common := NewCommonTestInput(t, sim, boundContract, &blockNum, nodeStorage, true) + // Prepare blocks with events + // Check that the state is empty before the test + // Check SyncHistory doesn't execute any tasks -> doesn't run any of Controller methods + // Check the node storage for existing of operators and a validator + t.Run("SyncHistory happy flow", func(t *testing.T) { + // BLOCK 2. produce OPERATOR ADDED + // Check that there are no registered operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 0, len(operators)) + + opAddedInput := NewOperatorAddedEventInput(common) + opAddedInput.prepare(ops, auth) + opAddedInput.produce() + + testEnv.CloseFollowDistance(&blockNum) + } + + // BLOCK 3: VALIDATOR ADDED: + // Check that there were no operations for Alice Validator + { + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{0, 1}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Run SyncHistory + lastHandledBlockNum, err = eventSyncer.SyncHistory(ctx, lastHandledBlockNum) + require.NoError(t, err) + + //check all the events were handled correctly and block number was increased + require.Equal(t, blockNum-*testEnv.followDistance, lastHandledBlockNum) + fmt.Println("lastHandledBlockNum", lastHandledBlockNum) + + // Check that operators were successfully registered + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Check that validator was registered + shares := nodeStorage.Shares().List(nil) + require.Equal(t, len(valAddInput.events), len(shares)) + + // Check the nonce was bumped + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + } + }) + + // Main difference between "online" events handling and syncing the historical (old) events + // is that here we have to check that the controller was triggered + t.Run("SyncOngoing happy flow", func(t *testing.T) { + go func() { + err = eventSyncer.SyncOngoing(ctx, lastHandledBlockNum+1) + require.NoError(t, err) + }() + + stopChan := make(chan struct{}) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-stopChan: + return + default: + time.Sleep(100 * time.Millisecond) + } + } + }() + + // Step 1: Add more validators + { + validatorCtrl.EXPECT().StartValidator(gomock.Any()).AnyTimes() + + // Check current nonce before start + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{2, 3, 4, 5, 6}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 5000) + + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + // Not sure does this make sense + require.Equal(t, uint64(testEnv.sim.Blockchain.CurrentBlock().Number.Int64()), *common.blockNum) + } + + // Step 2: remove validator + { + validatorCtrl.EXPECT().StopValidator(gomock.Any()).AnyTimes() + + shares := nodeStorage.Shares().List(nil) + require.Equal(t, 7, len(shares)) + + valRemove := NewTestValidatorRemovedEventsInput(common) + valRemove.prepare( + validators, + []uint64{0, 1}, + []uint64{1, 2, 3, 4}, + auth, + cluster, + ) + valRemove.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 500) + + shares = nodeStorage.Shares().List(nil) + require.Equal(t, 5, len(shares)) + + for _, event := range valRemove.events { + valPubKey := event.validator.masterPubKey.Serialize() + valShare := nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + } + } + + // Step 3 Liquidate Cluster + { + validatorCtrl.EXPECT().LiquidateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterLiquidate := NewTestClusterLiquidatedInput(common) + clusterLiquidate.prepare([]*ClusterLiquidatedEventInput{ + { + auth: auth, + ownerAddress: &testAddrAlice, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterLiquidate.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + } + + // Step 4 Reactivate Cluster + { + validatorCtrl.EXPECT().ReactivateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + + // Trigger the event + clusterReactivated := NewTestClusterReactivatedInput(common) + clusterReactivated.prepare([]*ClusterReactivatedEventInput{ + { + auth: auth, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterReactivated.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + shares = nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.False(t, s.Liquidated) + } + } + + // Step 5 Remove some Operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 4, len(operators)) + + opRemoved := NewOperatorRemovedEventInput(common) + opRemoved.prepare([]uint64{1, 2}, auth) + opRemoved.produce() + testEnv.CloseFollowDistance(&blockNum) + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + } + + // Step 6 Update Fee Recipient + { + validatorCtrl.EXPECT().UpdateFeeRecipient(gomock.Any(), gomock.Any()).Times(1) + + setFeeRecipient := NewSetFeeRecipientAddressInput(common) + setFeeRecipient.prepare([]*SetFeeRecipientAddressEventInput{ + {auth, &testAddrBob}, + }) + setFeeRecipient.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + recipientData, found, err := nodeStorage.GetRecipientData(nil, testAddrAlice) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, testAddrBob.String(), recipientData.FeeRecipient.String()) + } + + stopChan <- struct{}{} + }) +} diff --git a/eth/ethtest/operator_added_test.go b/eth/ethtest/operator_added_test.go new file mode 100644 index 0000000000..9a173a5064 --- /dev/null +++ b/eth/ethtest/operator_added_test.go @@ -0,0 +1,86 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/eventparser" +) + +type testOperatorAddedEventInput struct { + op *testOperator + auth *bind.TransactOpts +} + +type ProduceOperatorAddedEventsInput struct { + *CommonTestInput + events []*testOperatorAddedEventInput +} + +func NewOperatorAddedEventInput(common *CommonTestInput) *ProduceOperatorAddedEventsInput { + return &ProduceOperatorAddedEventsInput{common, nil} +} + +func (input *ProduceOperatorAddedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorAddedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.op == nil: + return fmt.Errorf("validation error: input.op is empty") + } + + return nil +} + +func (input *ProduceOperatorAddedEventsInput) prepare( + ops []*testOperator, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorAddedEventInput, len(ops)) + + for i, op := range ops { + input.events[i] = &testOperatorAddedEventInput{op, auth} + } +} + +func (input *ProduceOperatorAddedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + op := event.op + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(input.t, err) + _, err = input.boundContract.SimcontractTransactor.RegisterOperator(event.auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/operator_removed_test.go b/eth/ethtest/operator_removed_test.go new file mode 100644 index 0000000000..5b4dd27822 --- /dev/null +++ b/eth/ethtest/operator_removed_test.go @@ -0,0 +1,83 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" +) + +type testOperatorRemovedEventInput struct { + opId uint64 + auth *bind.TransactOpts +} + +type ProduceOperatorRemovedEventsInput struct { + *CommonTestInput + events []*testOperatorRemovedEventInput +} + +func NewOperatorRemovedEventInput(common *CommonTestInput) *ProduceOperatorRemovedEventsInput { + return &ProduceOperatorRemovedEventsInput{common, nil} +} + +func (input *ProduceOperatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorRemovedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.opId == 0: + return fmt.Errorf("validation error: input.opId is invalid") + } + + return nil +} + +func (input *ProduceOperatorRemovedEventsInput) prepare( + opsIds []uint64, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorRemovedEventInput, len(opsIds)) + + for i, opId := range opsIds { + input.events[i] = &testOperatorRemovedEventInput{opId, auth} + } +} + +func (input *ProduceOperatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + _, err = input.boundContract.SimcontractTransactor.RemoveOperator( + event.auth, + event.opId, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/set_fee_recipient_test.go b/eth/ethtest/set_fee_recipient_test.go new file mode 100644 index 0000000000..14ac7dd263 --- /dev/null +++ b/eth/ethtest/set_fee_recipient_test.go @@ -0,0 +1,80 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +type SetFeeRecipientAddressInput struct { + *CommonTestInput + events []*SetFeeRecipientAddressEventInput +} + +func NewSetFeeRecipientAddressInput(common *CommonTestInput) *SetFeeRecipientAddressInput { + return &SetFeeRecipientAddressInput{common, nil} +} + +func (input *SetFeeRecipientAddressInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type SetFeeRecipientAddressEventInput struct { + auth *bind.TransactOpts + address *ethcommon.Address +} + +func (input *SetFeeRecipientAddressEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.address == nil: + return fmt.Errorf("validation error: input.address is empty") + } + + return nil +} + +func (input *SetFeeRecipientAddressInput) prepare( + eventsToDo []*SetFeeRecipientAddressEventInput, +) { + input.events = eventsToDo +} + +func (input *SetFeeRecipientAddressInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.SetFeeRecipientAddress( + event.auth, + *event.address, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/utils_test.go b/eth/ethtest/utils_test.go new file mode 100644 index 0000000000..289030f7c8 --- /dev/null +++ b/eth/ethtest/utils_test.go @@ -0,0 +1,300 @@ +package ethtest + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "errors" + "fmt" + "math/big" + "testing" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/herumi/bls-eth-go-binary/bls" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/ekm" + "github.com/bloxapp/ssv/eth/contract" + "github.com/bloxapp/ssv/eth/eventhandler" + "github.com/bloxapp/ssv/eth/eventparser" + "github.com/bloxapp/ssv/eth/simulator" + ibftstorage "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/networkconfig" + operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + registrystorage "github.com/bloxapp/ssv/registry/storage" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/blskeygen" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" +) + +type testValidatorData struct { + masterKey *bls.SecretKey + masterPubKey *bls.PublicKey + masterPublicKeys bls.PublicKeys + operatorsShares []*testShare +} + +type testOperator struct { + id uint64 + rsaPub []byte + rsaPriv []byte +} + +type testShare struct { + opId uint64 + sec *bls.SecretKey + pub *bls.PublicKey +} + +func createNewValidator(ops []*testOperator) (*testValidatorData, error) { + validatorData := &testValidatorData{} + sharesCount := uint64(len(ops)) + threshold.Init() + + msk, mpk := blskeygen.GenBLSKeyPair() + secVec := msk.GetMasterSecretKey(int(sharesCount)) + pubKeys := bls.GetMasterPublicKey(secVec) + splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) + if err != nil { + return nil, err + } + + validatorData.operatorsShares = make([]*testShare, sharesCount) + + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { + validatorData.operatorsShares[i-1] = &testShare{ + opId: i, + sec: splitKeys[i], + pub: splitKeys[i].GetPublicKey(), + } + } + + validatorData.masterKey = msk + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys + + return validatorData, nil +} + +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) + + for i := uint64(1); i <= num; i++ { + pb, sk, err := rsaencryption.GenerateKeys() + if err != nil { + return nil, err + } + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, + } + } + + return testOps, nil +} + +func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { + var pubKeys []byte + var encryptedShares []byte + + for i, op := range operators { + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) + if err != nil { + return nil, fmt.Errorf("can't convert public key: %w", err) + } + + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) + if err != nil { + return nil, fmt.Errorf("can't encrypt share: %w", err) + } + + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) + if err != nil { + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) + } + + // check that we encrypt right + shareSecret := &bls.SecretKey{} + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) + if err != nil { + return nil, err + } + if err = shareSecret.SetHexString(string(decryptedSharePrivateKey)); err != nil { + return nil, err + } + + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) + + } + + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) + sig := signed.Serialize() + + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") + } + + sharesData := append(pubKeys, encryptedShares...) + sharesDataSigned := append(sig, sharesData...) + + return sharesDataSigned, nil +} + +func setupEventHandler( + t *testing.T, + ctx context.Context, + logger *zap.Logger, + operator *testOperator, + ownerAddress *ethcommon.Address, + useMockCtrl bool, +) (*eventhandler.EventHandler, *mocks.MockController, *gomock.Controller, operatorstorage.Storage, error) { + db, err := kv.NewInMemory(logger, basedb.Options{ + Ctx: ctx, + }) + if err != nil { + return nil, nil, nil, nil, err + } + + storageMap := ibftstorage.NewStores() + nodeStorage, operatorData := setupOperatorStorage(logger, db, operator, ownerAddress) + testNetworkConfig := networkconfig.TestNetwork + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if err != nil { + return nil, nil, nil, nil, err + } + + ctrl := gomock.NewController(t) + bc := beacon.NewMockBeaconNode(ctrl) + + contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) + if err != nil { + return nil, nil, nil, nil, err + } + + if useMockCtrl { + validatorCtrl := mocks.NewMockController(ctrl) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + + if err != nil { + return nil, nil, nil, nil, err + } + + validatorCtrl.EXPECT().GetOperatorData().Return(operatorData).AnyTimes() + + return eh, validatorCtrl, ctrl, nodeStorage, nil + } + + validatorCtrl := validator.NewController(logger, validator.ControllerOptions{ + Context: ctx, + DB: db, + RegistryStorage: nodeStorage, + KeyManager: keyManager, + StorageMap: storageMap, + OperatorData: operatorData, + }) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + if err != nil { + return nil, nil, nil, nil, err + } + + return eh, nil, ctrl, nodeStorage, nil +} + +func setupOperatorStorage( + logger *zap.Logger, + db basedb.Database, + operator *testOperator, + ownerAddress *ethcommon.Address, +) (operatorstorage.Storage, *registrystorage.OperatorData) { + if operator == nil { + logger.Fatal("empty test operator was passed") + } + + nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) + if err != nil { + logger.Fatal("failed to create node storage", zap.Error(err)) + } + + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) + if err != nil { + logger.Fatal("couldn't setup operator private key", zap.Error(err)) + } + + _, found, err := nodeStorage.GetPrivateKey() + if err != nil || !found { + logger.Fatal("failed to get operator private key", zap.Error(err)) + } + var operatorData *registrystorage.OperatorData + operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) + + if err != nil { + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) + } + if !found { + operatorData = ®istrystorage.OperatorData{ + PublicKey: operatorPubKey, + ID: operator.id, + OwnerAddress: *ownerAddress, + } + } + + return nodeStorage, operatorData +} + +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + + return simulator.NewSimulatedBackend( + genesis, 50_000_000, + ) +} diff --git a/eth/ethtest/validator_added_test.go b/eth/ethtest/validator_added_test.go new file mode 100644 index 0000000000..2497552e7f --- /dev/null +++ b/eth/ethtest/validator_added_test.go @@ -0,0 +1,134 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +type testValidatorRegisteredInput struct { + *CommonTestInput + events []*validatorRegisteredEventInput +} + +func NewTestValidatorRegisteredInput(common *CommonTestInput) *testValidatorRegisteredInput { + return &testValidatorRegisteredInput{common, nil} +} + +func (input *testValidatorRegisteredInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type validatorRegisteredEventInput struct { + auth *bind.TransactOpts + ops []*testOperator + validator *testValidatorData + share []byte + opsIds []uint64 // separating opsIds from ops as it is a separate event field and should be used for destructive tests +} + +func (input *validatorRegisteredEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.share) == 0: + return fmt.Errorf("validation error: input.share is empty") + case len(input.ops) == 0: + return fmt.Errorf("validation error: input.ops is empty") + } + + if len(input.opsIds) == 0 { + input.opsIds = make([]uint64, len(input.ops)) + for i, op := range input.ops { + input.opsIds[i] = op.id + } + } + + return nil +} + +func (input *testValidatorRegisteredInput) prepare( + validators []*testValidatorData, + shares [][]byte, + ops []*testOperator, + auth *bind.TransactOpts, + expectedNonce *registrystorage.Nonce, + validatorsIds []uint32, +) { + input.events = make([]*validatorRegisteredEventInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + // Check there are no shares in the state for the current validator + valPubKey := validators[validatorId].masterPubKey.Serialize() + share := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, share) + + // Create event input + input.events[i] = &validatorRegisteredEventInput{ + validator: validators[validatorId], + share: shares[validatorId], + auth: auth, + ops: ops, + } + + // expect nonce bumping after each of these ValidatorAdded events handling + *expectedNonce++ + } +} + +func (input *testValidatorRegisteredInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + val := event.validator + valPubKey := val.masterPubKey.Serialize() + shares := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, shares) + + // Call the contract method + _, err := input.boundContract.SimcontractTransactor.RegisterValidator( + event.auth, + val.masterPubKey.Serialize(), + event.opsIds, + event.share, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/validator_removed_test.go b/eth/ethtest/validator_removed_test.go new file mode 100644 index 0000000000..778b67dff8 --- /dev/null +++ b/eth/ethtest/validator_removed_test.go @@ -0,0 +1,104 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testValidatorRemovedInput struct { + auth *bind.TransactOpts + validator *testValidatorData + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *testValidatorRemovedInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +type TestValidatorRemovedEventsInput struct { + *CommonTestInput + events []*testValidatorRemovedInput +} + +func (input *TestValidatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: empty CommonTestInput") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +func NewTestValidatorRemovedEventsInput(common *CommonTestInput) *TestValidatorRemovedEventsInput { + return &TestValidatorRemovedEventsInput{common, nil} +} + +func (input *TestValidatorRemovedEventsInput) prepare( + validators []*testValidatorData, + validatorsIds []uint64, + opsIds []uint64, + auth *bind.TransactOpts, + cluster *simcontract.CallableCluster, +) { + input.events = make([]*testValidatorRemovedInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + input.events[i] = &testValidatorRemovedInput{ + auth, + validators[validatorId], + opsIds, + cluster, + } + } +} + +func (input *TestValidatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + valPubKey := event.validator.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := input.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(input.t, valShare) + + _, err = input.boundContract.SimcontractTransactor.RemoveValidator( + event.auth, + valPubKey, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index 1c909caf88..b207c78a25 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -46,7 +46,7 @@ var ( type taskExecutor interface { StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient ethcommon.Address) error @@ -285,7 +285,7 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, return nil, nil } - sharePK, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) + validatorPubKey, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) if err != nil { eh.metrics.EventProcessingFailed(abiEvent.Name) @@ -298,13 +298,11 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, defer eh.metrics.EventProcessed(abiEvent.Name) - if sharePK == nil { - return nil, nil + if validatorPubKey != nil { + return NewStopValidatorTask(eh.taskExecutor, validatorPubKey), nil } - task := NewStopValidatorTask(eh.taskExecutor, validatorRemovedEvent.PublicKey) - - return task, nil + return nil, nil case ClusterLiquidated: clusterLiquidatedEvent, err := eh.eventParser.ParseClusterLiquidated(event) diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index bf1f96961e..070de44d04 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -1,6 +1,7 @@ package eventhandler import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -12,13 +13,8 @@ import ( "strings" "testing" - "github.com/bloxapp/ssv/operator/validator" - "github.com/bloxapp/ssv/operator/validator/mocks" - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/bloxapp/ssv/utils/blskeygen" - "github.com/pkg/errors" - + ekmcore "github.com/bloxapp/eth2-key-manager/core" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" @@ -27,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/golang/mock/gomock" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -40,10 +37,15 @@ import ( ibftstorage "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/networkconfig" operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/blskeygen" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/utils/threshold" ) @@ -60,15 +62,34 @@ func TestHandleBlockEventsStream(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + operatorsCount := uint64(0) // Create operators rsa keys - ops, err := createOperators(4) + ops, err := createOperators(4, operatorsCount) require.NoError(t, err) + operatorsCount += uint64(len(ops)) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + currentSlot := &utils.SlotValue{} + mockBeaconNetwork := utils.SetupMockBeaconNetwork(t, currentSlot) + mockNetworkConfig := &networkconfig.NetworkConfig{ + Beacon: mockBeaconNetwork, + } + + eh, _, err := setupEventHandler(t, ctx, logger, mockNetworkConfig, ops[0], false) if err != nil { t.Fatal(err) } - sim := simTestBackend(testAddr) + + // Just creating one more key -> address for testing + wrongPk, err := crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + require.NoError(t, err) + testAddr2 := crypto.PubkeyToAddress(wrongPk.PublicKey) + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddr + testAddresses[1] = &testAddr2 + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) // Create JSON-RPC handler rpcServer, _ := sim.Node.RPCHandler() @@ -114,13 +135,23 @@ func TestHandleBlockEventsStream(t *testing.T) { sharesData1, err := generateSharesData(validatorData1, ops, testAddr, 0) require.NoError(t, err) + // Create another validator. We'll create the shares later in the tests + validatorData2, err := createNewValidator(ops) + require.NoError(t, err) + + validatorData3, err := createNewValidator(ops) + require.NoError(t, err) + sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) + require.NoError(t, err) + blockNum := uint64(0x1) + currentSlot.SetSlot(100) t.Run("test OperatorAdded event handle", func(t *testing.T) { for _, op := range ops { // Call the contract method - packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.pub) + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) require.NoError(t, err) _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) require.NoError(t, err) @@ -139,22 +170,22 @@ func TestHandleBlockEventsStream(t *testing.T) { }() // Check that there is no registered operators - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, 0, len(operators)) - // Hanlde the event + // Handle the event lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check storage for a new operator - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + // Check storage for the new operators + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) - // Check if an operator in the storage has same attributes + // Check if operators in the storage have same attributes for i, log := range block.Logs { operatorAddedEvent, err := contractFilterer.ParseOperatorAdded(log) require.NoError(t, err) @@ -162,47 +193,124 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, operatorAddedEvent.OperatorId, data.ID) require.Equal(t, operatorAddedEvent.Owner, data.OwnerAddress) - require.Equal(t, ops[i].pub, data.PublicKey) + require.Equal(t, ops[i].rsaPub, data.PublicKey) } }) - // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct t.Run("test OperatorRemoved event handle", func(t *testing.T) { - // Call the contract method - _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 1) - require.NoError(t, err) - sim.Commit() - block := <-logs - require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + // Should return MalformedEventError and no changes to the state + t.Run("test OperatorRemoved incorrect operator ID", func(t *testing.T) { + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 100500) + require.NoError(t, err) + sim.Commit() - eventsCh := make(chan executionclient.BlockLogs) - go func() { - defer close(eventsCh) - eventsCh <- block - }() + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) - // Check that there is 1 registered operator - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() - // Hanlde the event - lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) - require.NoError(t, err) - blockNum++ + // Check that there is 1 registered operator + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) - // Check if the operator was removed successfuly - // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check if the operator wasn't removed successfully + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct + // TODO: fix this test. It checks nothing, due the handleOperatorRemoved method is no-op currently + t.Run("test OperatorRemoved happy flow", func(t *testing.T) { + // Prepare a new operator to remove it later in this test + op, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + + // Call the contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op[0].rsaPub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Handle OperatorAdded event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + // Check storage for the new operator + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Now start the OperatorRemoved event handling + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 4) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Handle OperatorRemoved event + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + // Check if the operator was removed successfully + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + //require.NoError(t, err) + //require.Equal(t, len(ops), len(operators)) + }) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, // public key is correct, owner is correct, operator ids are correct, shares are correct + // slashing protection data is correct t.Run("test ValidatorAdded event handle", func(t *testing.T) { nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) require.NoError(t, err) @@ -236,9 +344,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + + requireKeyManagerDataToExist(t, eh, 1, validatorData1) + // Check that validator was registered shares := eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -247,12 +358,11 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(1), nonce) - validatorData2, err := createNewValidator(ops) - require.NoError(t, err) sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) require.NoError(t, err) // SharesData length is incorrect. Nonce is bumped; Validator wasn't added + // slashing protection data is not added t.Run("test nonce bumping even for incorrect sharesData length", func(t *testing.T) { // changing the length malformedSharesData := sharesData2[:len(sharesData2)-1] @@ -285,10 +395,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 1, validatorData2) + // Check that validator was not registered, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -299,6 +411,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Length of the shares []byte is correct; nonce is bumped; validator is added + // slashing protection data is correct t.Run("test validator 1 doesnt check validator's 4 share", func(t *testing.T) { malformedSharesData := sharesData2[:] // Corrupt the encrypted last share key of the 4th operator @@ -332,10 +445,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 2, validatorData2) + // Check that validator was registered for op1, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -345,13 +460,9 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, registrystorage.Nonce(3), nonce) }) - validatorData3, err := createNewValidator(ops) - require.NoError(t, err) - sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) - require.NoError(t, err) - // Share for 1st operator is malformed; check nonce is bumped correctly; validator wasn't added - t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { + // slashing protection data is not added + t.Run("test malformed ValidatorAdded and nonce is bumped", func(t *testing.T) { malformedSharesData := sharesData3[:] operatorCount := len(ops) @@ -389,10 +500,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 2, validatorData3) + // Check that validator was not registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -403,6 +516,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Correct event; check nonce is bumped correctly; validator is added + // slashing protection data is correct t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { // regenerate with updated nonce sharesData3, err = generateSharesData(validatorData3, ops, testAddr, 4) @@ -435,10 +549,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 3, validatorData3) + // Check that validator was registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 3, len(shares)) @@ -447,14 +563,192 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(5), nonce) }) + + t.Run("test correct ValidatorAdded again and nonce is bumped with another owner", func(t *testing.T) { + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + authTestAddr2, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr2, 0) + require.NoError(t, err) + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + authTestAddr2, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) + blockNum++ + + requireKeyManagerDataToExist(t, eh, 4, validatorData4) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, 4, len(shares)) + // and nonce was bumped + nonce, err = eh.nodeStorage.GetNextNonce(nil, testAddr2) + require.NoError(t, err) + // Check that nonces are not intertwined between different owner accounts! + require.Equal(t, registrystorage.Nonce(1), nonce) + }) + }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, - // public key is correct, owner is correct, operator ids are correct - t.Run("test ValidatorRemoved event handle", func(t *testing.T) { - _, err = boundContract.SimcontractTransactor.RemoveValidator( + t.Run("test ValidatorRemoved event handling", func(t *testing.T) { + // Must throw error "malformed event: could not find validator share" + t.Run("ValidatorRemoved incorrect event public key", func(t *testing.T) { + pk := validatorData1.masterPubKey.Serialize() + // Corrupt the public key + pk[len(pk)-1] ^= 1 + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + pk, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + t.Run("ValidatorRemoved incorrect owner address", func(t *testing.T) { + wrongAuth, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + wrongAuth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, + // public key is correct, owner is correct, operator ids are correct + // event handler's own operator is responsible for removed validator + t.Run("ValidatorRemoved happy flow", func(t *testing.T) { + valPubKey := validatorData1.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, valShare) + requireKeyManagerDataToExist(t, eh, 4, validatorData1) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator was removed from the validator shares storage. + shares := eh.nodeStorage.Shares().List(nil) + require.Equal(t, 3, len(shares)) + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + requireKeyManagerDataToNotExist(t, eh, 3, validatorData1) + }) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct + // slashing protection data is not deleted + t.Run("test ClusterLiquidated event handle", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Liquidate( auth, - validatorData1.masterPubKey.Serialize(), + testAddr, []uint64{1, 2, 3, 4}, simcontract.CallableCluster{ ValidatorCount: 1, @@ -468,7 +762,7 @@ func TestHandleBlockEventsStream(t *testing.T) { block := <-logs require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) eventsCh := make(chan executionclient.BlockLogs) go func() { @@ -476,14 +770,91 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests. This one has to be in the state + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) + // check that slashing data was not deleted + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct - t.Run("test ClusterLiquidated event handle", func(t *testing.T) { + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is nextEpoch and NOT storedEpoch + t.Run("test ClusterReactivated event handle", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + currentSlot.SetSlot(1000) + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + + // check that slashing data was bumped + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) + + blockNum++ + }) + + // Liquidated event is far in the future + // in order to simulate stored far in the future slashing protection data + t.Run("test ClusterLiquidated event handle - far in the future", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Liquidate( auth, testAddr, @@ -514,11 +885,13 @@ func TestHandleBlockEventsStream(t *testing.T) { blockNum++ }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct - t.Run("test ClusterReactivated event handle", func(t *testing.T) { + // Reactivate event + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is storedEpoch and NOT nextEpoch + t.Run("test ClusterReactivated event handle - far in the future", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Reactivate( auth, - []uint64{1, 2, 3}, + []uint64{1, 2, 3, 4}, big.NewInt(100_000_000), simcontract.CallableCluster{ ValidatorCount: 1, @@ -540,17 +913,44 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) + currentSlot.SetSlot(100) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + + // check that slashing data is greater than current epoch + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Greater(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Greater(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Greater(t, highestProposal, currentSlot.GetSlot()) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, fee recipient is correct t.Run("test FeeRecipientAddressUpdated event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.SetFeeRecipientAddress( auth, - ethcommon.HexToAddress("0x1"), + testAddr2, ) require.NoError(t, err) sim.Commit() @@ -569,14 +969,202 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check if the fee recepient was updated - recepientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) + // Check if the fee recipient was updated + recipientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) require.NoError(t, err) - require.Equal(t, ethcommon.HexToAddress("0x1").String(), recepientData.FeeRecipient.String()) + require.Equal(t, testAddr2.String(), recipientData.FeeRecipient.String()) + }) + + // DO / UNDO in one block tests + t.Run("test DO / UNDO in one block", func(t *testing.T) { + t.Run("test OperatorAdded + OperatorRemoved events handling", func(t *testing.T) { + // There are 5 ops before the test running + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, operatorsCount, uint64(len(operators))) + + tmpOps, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + op := tmpOps[0] + + // Call the RegisterOperator contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + // Call the OperatorRemoved contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, op.id) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // #TODO: Fails until we fix the OperatorAdded: handlers.go #108 + // Check storage for the new operators + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + //require.NoError(t, err) + //require.Equal(t, operatorsCount-1, uint64(len(operators))) + // + //_, found, err := eh.nodeStorage.GetOperatorData(nil, op.id) + //require.NoError(t, err) + //require.False(t, found) + }) + + t.Run("test ValidatorAdded + ValidatorRemoved events handling", func(t *testing.T) { + shares := eh.nodeStorage.Shares().List(nil) + sharesCountBeforeTest := len(shares) + + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + + currentNonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr, int(currentNonce)) + require.NoError(t, err) + + valPubKey := validatorData4.masterPubKey.Serialize() + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + auth, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + valPubKey, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, sharesCountBeforeTest, len(shares)) + // and nonce was bumped + nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + require.Equal(t, currentNonce+1, nonce) + }) + + t.Run("test ClusterLiquidated + ClusterReactivated events handling", func(t *testing.T) { + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + + require.NotNil(t, share) + require.False(t, share.Liquidated) + _, err = boundContract.SimcontractTransactor.Liquidate( + auth, + testAddr, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + }) }) } -func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { +func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, network *networkconfig.NetworkConfig, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { db, err := kv.NewInMemory(logger, basedb.Options{ Ctx: ctx, }) @@ -584,9 +1172,14 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op storageMap := ibftstorage.NewStores() nodeStorage, operatorData := setupOperatorStorage(logger, db, operator) - testNetworkConfig := networkconfig.TestNetwork - keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, &utils.SlotValue{}), + } + } + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, *network, true, "") if err != nil { return nil, nil, err } @@ -607,7 +1200,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -633,6 +1226,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op KeyManager: keyManager, StorageMap: storageMap, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) @@ -644,7 +1238,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -660,7 +1254,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *testOperator) (operatorstorage.Storage, *registrystorage.OperatorData) { if operator == nil { - logger.Fatal("empty test operator was passed", zap.Error(fmt.Errorf("empty test operator was passed"))) + logger.Fatal("empty test operator was passed") } nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) @@ -668,9 +1262,9 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test logger.Fatal("failed to create node storage", zap.Error(err)) } - operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.priv)) + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) if err != nil { - logger.Fatal("could not setup operator private key", zap.Error(err)) + logger.Fatal("couldn't setup operator private key", zap.Error(err)) } _, found, err := nodeStorage.GetPrivateKey() @@ -681,7 +1275,7 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) if err != nil { - logger.Fatal("could not get operator data by public key", zap.Error(err)) + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) } if !found { operatorData = ®istrystorage.OperatorData{ @@ -704,20 +1298,22 @@ func unmarshalLog(t *testing.T, rawOperatorAdded string) ethtypes.Log { return vLogOperatorAdded } -func simTestBackend(testAddr ethcommon.Address) *simulator.SimulatedBackend { +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + return simulator.NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000000000)}, - }, 10000000, + genesis, 50_000_000, ) } func TestCreatingSharesData(t *testing.T) { - owner := testAddr nonce := 0 - // - ops, err := createOperators(4) + ops, err := createOperators(4, 1) require.NoError(t, err) validatorData, err := createNewValidator(ops) @@ -742,7 +1338,7 @@ func TestCreatingSharesData(t *testing.T) { encryptedKeys := splitBytes(sharesData[pubKeysOffset:], len(sharesData[pubKeysOffset:])/operatorCount) for i, enck := range encryptedKeys { - priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].priv)) + priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].rsaPriv)) require.NoError(t, err) decryptedSharePrivateKey, err := rsaencryption.DecodeKey(priv, enck) require.NoError(t, err) @@ -763,9 +1359,9 @@ type testValidatorData struct { } type testOperator struct { - id uint64 - pub []byte // rsa pub - priv []byte // rsa sk + id uint64 + rsaPub []byte + rsaPriv []byte } type testShare struct { @@ -774,24 +1370,32 @@ type testShare struct { pub *bls.PublicKey } +func shareExist(accounts []ekmcore.ValidatorAccount, sharePubKey []byte) bool { + for _, acc := range accounts { + if bytes.Equal(acc.ValidatorPublicKey(), sharePubKey) { + return true + } + } + return false +} + func createNewValidator(ops []*testOperator) (*testValidatorData, error) { validatorData := &testValidatorData{} sharesCount := uint64(len(ops)) threshold.Init() - msk, pubk := blskeygen.GenBLSKeyPair() + msk, mpk := blskeygen.GenBLSKeyPair() secVec := msk.GetMasterSecretKey(int(sharesCount)) - pubks := bls.GetMasterPublicKey(secVec) + pubKeys := bls.GetMasterPublicKey(secVec) splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) if err != nil { return nil, err } - num := uint64(len(ops)) - validatorData.operatorsShares = make([]*testShare, num) + validatorData.operatorsShares = make([]*testShare, sharesCount) - // derive a `hareCount` number of shares - for i := uint64(1); i <= num; i++ { + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { validatorData.operatorsShares[i-1] = &testShare{ opId: i, sec: splitKeys[i], @@ -800,54 +1404,54 @@ func createNewValidator(ops []*testOperator) (*testValidatorData, error) { } validatorData.masterKey = msk - validatorData.masterPubKey = pubk - validatorData.masterPublicKeys = pubks + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys return validatorData, nil } -func createOperators(num uint64) ([]*testOperator, error) { - testops := make([]*testOperator, num) +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) for i := uint64(1); i <= num; i++ { pb, sk, err := rsaencryption.GenerateKeys() if err != nil { return nil, err } - testops[i-1] = &testOperator{ - id: i, - pub: pb, - priv: sk, + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, } } - return testops, nil + return testOps, nil } func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { - var pubkeys []byte + var pubKeys []byte var encryptedShares []byte for i, op := range operators { - rsakey, err := rsaencryption.ConvertPemToPublicKey(op.pub) + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) if err != nil { - return nil, fmt.Errorf("cant convert publickey: %w", err) + return nil, fmt.Errorf("can't convert public key: %w", err) } - rawshare := validatorData.operatorsShares[i].sec.SerializeToHexStr() - ciphertext, err := rsa.EncryptPKCS1v15(rand.Reader, rsakey, []byte(rawshare)) + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) if err != nil { - return nil, errors.New("cant encrypt share") + return nil, fmt.Errorf("can't encrypt share: %w", err) } - rsapriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.priv)) + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) if err != nil { - return nil, err + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) } // check that we encrypt right shareSecret := &bls.SecretKey{} - decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsapriv, ciphertext) + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) if err != nil { return nil, err } @@ -855,22 +1459,56 @@ func generateSharesData(validatorData *testValidatorData, operators []*testOpera return nil, err } - pubkeys = append(pubkeys, validatorData.operatorsShares[i].pub.Serialize()...) - encryptedShares = append(encryptedShares, ciphertext...) + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) } - tosign := fmt.Sprintf("%s:%d", owner.String(), nonce) - msghash := crypto.Keccak256([]byte(tosign)) - signed := validatorData.masterKey.Sign(string(msghash)) + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) sig := signed.Serialize() - if !signed.VerifyByte(validatorData.masterPubKey, msghash) { - return nil, errors.New("couldn't sign correctly") + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") } - sharesData := append(pubkeys, encryptedShares...) + sharesData := append(pubKeys, encryptedShares...) sharesDataSigned := append(sig, sharesData...) return sharesDataSigned, nil } + +func requireKeyManagerDataToExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.True(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) +} + +func requireKeyManagerDataToNotExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.False(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.False(t, found) +} diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 7c25d7e6f4..d4632ddf6f 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -12,6 +12,7 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" "go.uber.org/zap" + "github.com/bloxapp/ssv/ekm" "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/logging/fields" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" @@ -39,10 +40,10 @@ var ( func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.ContractOperatorAdded) error { logger := eh.logger.With( - zap.String("event_type", OperatorAdded), + fields.EventName(OperatorAdded), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.OperatorPubKey(event.PublicKey), ) logger.Debug("processing event") @@ -85,7 +86,7 @@ func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.Cont func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.ContractOperatorRemoved) error { logger := eh.logger.With( - zap.String("event_type", OperatorRemoved), + fields.EventName(OperatorRemoved), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), ) @@ -101,8 +102,8 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co } logger = logger.With( - zap.String("operator_pub_key", ethcommon.Bytes2Hex(od.PublicKey)), - zap.String("owner_address", od.OwnerAddress.String()), + fields.OperatorPubKey(od.PublicKey), + fields.Owner(od.OwnerAddress), ) // TODO: In original handler we didn't delete operator data, so this behavior was preserved. However we likely need to. @@ -124,10 +125,10 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co func (eh *EventHandler) handleValidatorAdded(txn basedb.Txn, event *contract.ContractValidatorAdded) (ownShare *ssvtypes.SSVShare, err error) { logger := eh.logger.With( - zap.String("event_type", ValidatorAdded), + fields.EventName(ValidatorAdded), fields.TxHash(event.Raw.TxHash), fields.Owner(event.Owner), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.OperatorIDs(event.OperatorIds), fields.Validator(event.PublicKey), ) @@ -324,12 +325,12 @@ func validatorAddedEventToShare( return &validatorShare, shareSecret, nil } -func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) ([]byte, error) { +func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) (spectypes.ValidatorPK, error) { logger := eh.logger.With( - zap.String("event_type", ValidatorRemoved), + fields.EventName(ValidatorRemoved), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), fields.PubKey(event.PublicKey), ) logger.Debug("processing event") @@ -372,6 +373,11 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C logger = logger.With(zap.String("validator_pubkey", hex.EncodeToString(share.ValidatorPubKey))) } if isOperatorShare { + err = eh.keyManager.RemoveShare(hex.EncodeToString(share.SharePubKey)) + if err != nil { + return nil, fmt.Errorf("could not remove share from ekm storage: %w", err) + } + eh.metrics.ValidatorRemoved(event.PublicKey) logger.Debug("processed event") return share.ValidatorPubKey, nil @@ -383,10 +389,10 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract.ContractClusterLiquidated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterLiquidated), + fields.EventName(ClusterLiquidated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -405,10 +411,10 @@ func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract. func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract.ContractClusterReactivated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterReactivated), + fields.EventName(ClusterReactivated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -417,6 +423,13 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract return nil, fmt.Errorf("could not process cluster event: %w", err) } + // bump slashing protection for operator reactivated validators + for _, share := range toReactivate { + if err := eh.keyManager.(ekm.StorageProvider).BumpSlashingProtection(share.SharePubKey); err != nil { + return nil, fmt.Errorf("could not bump slashing protection: %w", err) + } + } + if len(enabledPubKeys) > 0 { logger = logger.With(zap.Strings("enabled_validators", enabledPubKeys)) } @@ -427,9 +440,9 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract func (eh *EventHandler) handleFeeRecipientAddressUpdated(txn basedb.Txn, event *contract.ContractFeeRecipientAddressUpdated) (bool, error) { logger := eh.logger.With( - zap.String("event_type", FeeRecipientAddressUpdated), + fields.EventName(FeeRecipientAddressUpdated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.FeeRecipient(event.RecipientAddress.Bytes()), ) logger.Debug("processing event") diff --git a/eth/eventhandler/local_events_test.go b/eth/eventhandler/local_events_test.go index 7697c79363..fda1ae0080 100644 --- a/eth/eventhandler/local_events_test.go +++ b/eth/eventhandler/local_events_test.go @@ -18,7 +18,7 @@ import ( func TestHandleLocalEvent(t *testing.T) { // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) t.Run("correct OperatorAdded event", func(t *testing.T) { @@ -46,7 +46,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -73,7 +73,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } diff --git a/eth/eventhandler/task.go b/eth/eventhandler/task.go index 3e825140b8..f6e2894fa8 100644 --- a/eth/eventhandler/task.go +++ b/eth/eventhandler/task.go @@ -1,9 +1,10 @@ package eventhandler import ( + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) type Task interface { @@ -11,15 +12,15 @@ type Task interface { } type startValidatorExecutor interface { - StartValidator(share *ssvtypes.SSVShare) error + StartValidator(share *types.SSVShare) error } type StartValidatorTask struct { executor startValidatorExecutor - share *ssvtypes.SSVShare + share *types.SSVShare } -func NewStartValidatorTask(executor startValidatorExecutor, share *ssvtypes.SSVShare) *StartValidatorTask { +func NewStartValidatorTask(executor startValidatorExecutor, share *types.SSVShare) *StartValidatorTask { return &StartValidatorTask{ executor: executor, share: share, @@ -31,41 +32,41 @@ func (t StartValidatorTask) Execute() error { } type stopValidatorExecutor interface { - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error } type StopValidatorTask struct { - executor stopValidatorExecutor - publicKey []byte + executor stopValidatorExecutor + pubKey spectypes.ValidatorPK } -func NewStopValidatorTask(executor stopValidatorExecutor, publicKey []byte) *StopValidatorTask { +func NewStopValidatorTask(executor stopValidatorExecutor, pubKey spectypes.ValidatorPK) *StopValidatorTask { return &StopValidatorTask{ - executor: executor, - publicKey: publicKey, + executor: executor, + pubKey: pubKey, } } func (t StopValidatorTask) Execute() error { - return t.executor.StopValidator(t.publicKey) + return t.executor.StopValidator(t.pubKey) } type liquidateClusterExecutor interface { - LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error + LiquidateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error } type LiquidateClusterTask struct { executor liquidateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toLiquidate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toLiquidate []*types.SSVShare } func NewLiquidateClusterTask( executor liquidateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toLiquidate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toLiquidate []*types.SSVShare, ) *LiquidateClusterTask { return &LiquidateClusterTask{ executor: executor, @@ -80,21 +81,21 @@ func (t LiquidateClusterTask) Execute() error { } type reactivateClusterExecutor interface { - ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error + ReactivateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error } type ReactivateClusterTask struct { executor reactivateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toReactivate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toReactivate []*types.SSVShare } func NewReactivateClusterTask( executor reactivateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toReactivate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toReactivate []*types.SSVShare, ) *ReactivateClusterTask { return &ReactivateClusterTask{ executor: executor, diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index 8792aadc91..a735c53dc9 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -3,9 +3,10 @@ package eventhandler import ( "context" "encoding/binary" - "github.com/golang/mock/gomock" "testing" + "github.com/golang/mock/gomock" + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -48,10 +49,10 @@ func TestExecuteTask(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) - eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, ops[0], true) + eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, nil, ops[0], true) require.NoError(t, err) t.Run("test AddValidator task execution - not started", func(t *testing.T) { @@ -145,10 +146,10 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -189,7 +190,7 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { } happyFlow := []string{ "successfully setup operator keys", - "setting validator controller", + "setting up validator controller", "malformed event: failed to verify signature", "processed events from block", } diff --git a/eth/eventsyncer/event_syncer_test.go b/eth/eventsyncer/event_syncer_test.go index 4cd2e73e68..9b500fe091 100644 --- a/eth/eventsyncer/event_syncer_test.go +++ b/eth/eventsyncer/event_syncer_test.go @@ -11,6 +11,7 @@ import ( "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/ethereum/go-ethereum/accounts/abi" @@ -152,6 +153,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger) *e DB: db, RegistryStorage: nodeStorage, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) diff --git a/eth/executionclient/execution_client_test.go b/eth/executionclient/execution_client_test.go index 823515c52b..4fed0795c3 100644 --- a/eth/executionclient/execution_client_test.go +++ b/eth/executionclient/execution_client_test.go @@ -67,7 +67,7 @@ func TestFetchHistoricalLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -131,7 +131,7 @@ func TestStreamLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -215,7 +215,7 @@ func TestFetchLogsInBatches(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -325,7 +325,7 @@ func TestChainReorganizationLogs(t *testing.T) { // defer rpcServer.Stop() // defer httpsrv.Close() - // addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + // addr := httpToWebSocketURL(httpsrv.URL) // // 1. // parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -417,7 +417,7 @@ func TestSimSSV(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -584,3 +584,7 @@ func TestSimSSV(t *testing.T) { require.NoError(t, client.Close()) require.NoError(t, sim.Close()) } + +func httpToWebSocketURL(url string) string { + return "ws:" + strings.TrimPrefix(url, "http:") +} diff --git a/eth/simulator/simcontract/simcontract.go b/eth/simulator/simcontract/simcontract.go index 9da8921e7a..2877c65b29 100644 --- a/eth/simulator/simcontract/simcontract.go +++ b/eth/simulator/simcontract/simcontract.go @@ -41,7 +41,7 @@ type CallableCluster struct { // SimcontractMetaData contains all meta data concerning the Simcontract contract. var SimcontractMetaData = &bind.MetaData{ ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterLiquidated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterReactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"FeeRecipientAddressUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"OperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"OperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"shares\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorRemoved\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"clusterOwner\",\"type\":\"address\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"liquidate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"reactivate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"registerOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"bytes\",\"name\":\"sharesData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"registerValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"removeOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"removeValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"setFeeRecipientAddress\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b8273ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea2646970667358221220a849e84b21b5cf14144f9145592d2e879b8dfd174c980e9d839aabab095d209064736f6c63430008120033", + Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b3373ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea26469706673582212206464f7d32909b03e1e16f822f4ba73e56f9b875dfda6cb13f3fc97c182c5e43664736f6c63430008120033", } // SimcontractABI is the input ABI used to generate the binding from. diff --git a/eth/simulator/simcontract/simcontract.sol b/eth/simulator/simcontract/simcontract.sol index 23277e23e2..9325802822 100644 --- a/eth/simulator/simcontract/simcontract.sol +++ b/eth/simulator/simcontract/simcontract.sol @@ -52,20 +52,43 @@ contract Callable { _operatorId += 1; emit OperatorAdded(_operatorId, msg.sender, publicKey, fee); } - function removeOperator(uint64 operatorId) public {emit OperatorRemoved(operatorId);} + + function removeOperator(uint64 operatorId) public { + emit OperatorRemoved(operatorId); + } + function registerValidator( bytes calldata publicKey, uint64[] memory operatorIds, bytes calldata sharesData, uint256 amount, Cluster memory cluster - ) public { emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster);} + ) public { + emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster); + } + function removeValidator( bytes calldata publicKey, uint64[] calldata operatorIds, Cluster memory cluster - ) public {emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster);} - function liquidate(address clusterOwner, uint64[] memory operatorIds, Cluster memory cluster) public {emit ClusterLiquidated(clusterOwner, operatorIds, cluster);} - function reactivate(uint64[] calldata operatorIds, uint256 amount, Cluster memory cluster) public {emit ClusterReactivated(msg.sender, operatorIds, cluster);} + ) public { + emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster); + } + + function liquidate(address clusterOwner, + uint64[] memory operatorIds, + Cluster memory cluster + ) public { + emit ClusterLiquidated(msg.sender, operatorIds, cluster); + } + + function reactivate( + uint64[] calldata operatorIds, + uint256 amount, + Cluster memory cluster + ) public { + emit ClusterReactivated(msg.sender, operatorIds, cluster); + } + function setFeeRecipientAddress(address recipientAddress) public {emit FeeRecipientAddressUpdated(msg.sender, recipientAddress);} } diff --git a/go.mod b/go.mod index 5fa7730cf6..b39d5e0cc9 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.16.3 github.com/bloxapp/eth2-key-manager v1.3.1 - github.com/bloxapp/ssv-spec v0.3.1 + github.com/bloxapp/ssv-spec v0.3.3 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/cornelk/hashmap v1.0.8 @@ -37,6 +37,7 @@ require ( github.com/sourcegraph/conc v0.3.0 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 + github.com/wealdtech/go-eth2-types/v2 v2.8.1 github.com/wealdtech/go-eth2-util v1.8.1 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 go.uber.org/multierr v1.11.0 @@ -192,7 +193,6 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/urfave/cli/v2 v2.24.1 // indirect github.com/wealdtech/go-bytesutil v1.2.1 // indirect - github.com/wealdtech/go-eth2-types/v2 v2.8.1 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect @@ -223,4 +223,5 @@ replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f -replace github.com/bloxapp/ssv-spec => github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 +//TODO remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 +replace github.com/bloxapp/eth2-key-manager => github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 diff --git a/go.sum b/go.sum index 7b8753260f..cf4040a7be 100644 --- a/go.sum +++ b/go.sum @@ -54,10 +54,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bloxapp/eth2-key-manager v1.3.1 h1:1olQcOHRY2TN1o8JX9AN1siEIJXWnlM+BlknfBbXoo4= -github.com/bloxapp/eth2-key-manager v1.3.1/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 h1:ikChvdYVw4GFSlnIS+u1qmNqOvgq2a2H3b2FZ44KBn8= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= +github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 h1:vjrMmMH15Bo0QF+228CuEZvCI+OuPyJRco82Gj/WyTI= +github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/ssv-spec v0.3.3 h1:iNomqWQjxDDQouHMjl27PmH1hUolJ4u8QQ+HX/TQQcg= +github.com/bloxapp/ssv-spec v0.3.3/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= diff --git a/integration/qbft/tests/msg_router.go b/integration/qbft/tests/msg_router.go index bf3b667e98..dda7b7c243 100644 --- a/integration/qbft/tests/msg_router.go +++ b/integration/qbft/tests/msg_router.go @@ -1,21 +1,26 @@ package tests import ( - spectypes "github.com/bloxapp/ssv-spec/types" - protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + "context" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" ) type msgRouter struct { + logger *zap.Logger validator *protocolvalidator.Validator } -func (m *msgRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - m.validator.HandleMessage(logger, &message) +func (m *msgRouter) Route(_ context.Context, message *queue.DecodedSSVMessage) { + m.validator.HandleMessage(m.logger, message) } -func newMsgRouter(v *protocolvalidator.Validator) *msgRouter { +func newMsgRouter(logger *zap.Logger, v *protocolvalidator.Validator) *msgRouter { return &msgRouter{ validator: v, + logger: logger, } } diff --git a/integration/qbft/tests/round_change_test.go b/integration/qbft/tests/round_change_test.go index 65c6038e5f..4dbb839f5f 100644 --- a/integration/qbft/tests/round_change_test.go +++ b/integration/qbft/tests/round_change_test.go @@ -2,12 +2,13 @@ package tests import ( "testing" + "time" "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" - protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/stretchr/testify/require" + + protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) func TestRoundChange4CommitteeScenario(t *testing.T) { @@ -18,8 +19,8 @@ func TestRoundChange4CommitteeScenario(t *testing.T) { Duties: map[spectypes.OperatorID]DutyProperties{ 2: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, 1: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, - 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, - 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, + 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, + 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, }, ValidationFunctions: map[spectypes.OperatorID]func(*testing.T, int, *protocolstorage.StoredInstance){ 1: roundChangeValidator(), diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index 5fbf6c89b9..e803fd9616 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -2,7 +2,6 @@ package tests import ( "context" - "fmt" "testing" "time" @@ -21,11 +20,9 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/validator" protocolbeacon "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" @@ -63,15 +60,6 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { for id := 1; id <= s.Committee; id++ { id := spectypes.OperatorID(id) s.validators[id] = createValidator(t, ctx, id, getKeySet(s.Committee), logger, s.shared.Nodes[id]) - - stores := newStores(logger) - s.shared.Nodes[id].RegisterHandlers(logger, protocolp2p.WithHandler( - protocolp2p.LastDecidedProtocol, - handlers.LastDecidedHandler(logger.Named(fmt.Sprintf("decided-handler-%d", id)), stores, s.shared.Nodes[id]), - ), protocolp2p.WithHandler( - protocolp2p.DecidedHistoryProtocol, - handlers.HistoryHandler(logger.Named(fmt.Sprintf("history-handler-%d", id)), stores, s.shared.Nodes[id], 25), - )) } //invoking duties @@ -84,7 +72,7 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { copy(pk[:], getKeySet(s.Committee).ValidatorPK.Serialize()) ssvMsg, err := validator.CreateDutyExecuteMsg(duty, pk, networkconfig.TestNetwork.Domain) require.NoError(t, err) - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) require.NoError(t, err) s.validators[id].Queues[role].Q.Push(dec) @@ -218,7 +206,7 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID options.DutyRunners = validator.SetupRunners(ctx, logger, options) val := protocolvalidator.NewValidator(ctx, cancel, options) - node.UseMessageRouter(newMsgRouter(val)) + node.UseMessageRouter(newMsgRouter(logger, val)) started, err := val.Start(logger) require.NoError(t, err) require.True(t, started) diff --git a/logging/fields/fields.go b/logging/fields/fields.go index 6b1de4ffc5..3584f07915 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -64,6 +64,7 @@ const ( FieldName = "name" FieldNetwork = "network" FieldOperatorId = "operator_id" + FieldOperatorIDs = "operator_ids" FieldOperatorPubKey = "operator_pubkey" FieldOwnerAddress = "owner_address" FieldPeerID = "peer_id" @@ -190,6 +191,10 @@ func OperatorID(operatorId spectypes.OperatorID) zap.Field { return zap.Uint64(FieldOperatorId, operatorId) } +func OperatorIDs(operatorIDs []spectypes.OperatorID) zap.Field { + return zap.Uint64s(FieldOperatorIDs, operatorIDs) +} + func OperatorIDStr(operatorId string) zap.Field { return zap.String(FieldOperatorId, operatorId) } diff --git a/logging/names.go b/logging/names.go index 5a23d12da9..298f6a9ee0 100644 --- a/logging/names.go +++ b/logging/names.go @@ -23,4 +23,5 @@ const ( NamePubsubTrace = "PubsubTrace" NameScoreInspector = "ScoreInspector" NameEventHandler = "EventHandler" + NameDutyFetcher = "DutyFetcher" ) diff --git a/logging/testing.go b/logging/testing.go index 6b6abd8326..b7617c2680 100644 --- a/logging/testing.go +++ b/logging/testing.go @@ -5,16 +5,17 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestLogger(t *testing.T) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(t, err) return zap.L().Named(t.Name()) } func BenchLogger(b *testing.B) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(b, err) return zap.L().Named(b.Name()) } diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go new file mode 100644 index 0000000000..6bdf023fc4 --- /dev/null +++ b/message/validation/consensus_validation.go @@ -0,0 +1,434 @@ +package validation + +// consensus_validation.go contains methods for validating consensus messages + +import ( + "bytes" + "encoding/hex" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validateConsensusMessage( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, + messageID spectypes.MessageID, + receivedAt time.Time, +) (ConsensusDescriptor, phase0.Slot, error) { + var consensusDescriptor ConsensusDescriptor + + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + consensusDescriptor = ConsensusDescriptor{ + QBFTMessageType: signedMsg.Message.MsgType, + Round: msgRound, + Signers: signedMsg.Signers, + Committee: share.Committee, + } + + mv.metrics.ConsensusMsgType(signedMsg.Message.MsgType, len(signedMsg.Signers)) + + if messageID.GetRoleType() == spectypes.BNRoleValidatorRegistration { + return consensusDescriptor, msgSlot, ErrConsensusValidatorRegistration + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return consensusDescriptor, msgSlot, err + } + + if !mv.validQBFTMsgType(signedMsg.Message.MsgType) { + return consensusDescriptor, msgSlot, ErrUnknownQBFTMessageType + } + + if err := mv.validConsensusSigners(share, signedMsg); err != nil { + return consensusDescriptor, msgSlot, err + } + + role := messageID.GetRoleType() + + if err := mv.validateSlotTime(msgSlot, role, receivedAt); err != nil { + return consensusDescriptor, msgSlot, err + } + + if maxRound := mv.maxRound(role); msgRound > maxRound { + err := ErrRoundTooHigh + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("%v (%v role)", maxRound, role) + return consensusDescriptor, msgSlot, err + } + + slotStartTime := mv.netCfg.Beacon.GetSlotStartTime(msgSlot) /*. + Add(mv.waitAfterSlotStart(role))*/ // TODO: not supported yet because first round is non-deterministic now + + sinceSlotStart := time.Duration(0) + estimatedRound := specqbft.FirstRound + if receivedAt.After(slotStartTime) { + sinceSlotStart = receivedAt.Sub(slotStartTime) + estimatedRound = mv.currentEstimatedRound(sinceSlotStart) + } + + // TODO: lowestAllowed is not supported yet because first round is non-deterministic now + lowestAllowed := /*estimatedRound - allowedRoundsInPast*/ specqbft.FirstRound + highestAllowed := estimatedRound + allowedRoundsInFuture + + if msgRound < lowestAllowed || msgRound > highestAllowed { + err := ErrEstimatedRoundTooFar + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("between %v and %v (%v role) / %v passed", lowestAllowed, highestAllowed, role, sinceSlotStart) + return consensusDescriptor, msgSlot, err + } + + if mv.hasFullData(signedMsg) { + hashedFullData, err := specqbft.HashDataRoot(signedMsg.FullData) + if err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("hash data root: %w", err) + } + + if hashedFullData != signedMsg.Message.Root { + return consensusDescriptor, msgSlot, ErrInvalidHash + } + } + + if err := mv.validateBeaconDuty(messageID.GetRoleType(), msgSlot, share); err != nil { + return consensusDescriptor, msgSlot, err + } + + state := mv.consensusState(messageID) + for _, signer := range signedMsg.Signers { + if err := mv.validateSignerBehaviorConsensus(state, signer, share, messageID, signedMsg); err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("bad signer behavior: %w", err) + } + } + + if mv.verifySignatures { + if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.QBFTSignatureType, share.Committee); err != nil { + signErr := ErrInvalidSignature + signErr.innerErr = err + signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) + return consensusDescriptor, msgSlot, signErr + } + } + + for _, signer := range signedMsg.Signers { + signerState := state.GetSignerState(signer) + if signerState == nil { + signerState = state.CreateSignerState(signer) + } + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, msgRound, newEpoch) + } else if msgSlot == signerState.Slot && msgRound > signerState.Round { + signerState.ResetRound(msgRound) + } + + if mv.hasFullData(signedMsg) && signerState.ProposalData == nil { + signerState.ProposalData = signedMsg.FullData + } + + signerState.MessageCounts.RecordConsensusMessage(signedMsg) + } + + return consensusDescriptor, msgSlot, nil +} + +func (mv *messageValidator) validateJustifications( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, +) error { + pj, err := signedMsg.Message.GetPrepareJustifications() + if err != nil { + e := ErrMalformedPrepareJustifications + e.innerErr = err + return e + } + + if len(pj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType { + e := ErrUnexpectedPrepareJustifications + e.got = signedMsg.Message.MsgType + return e + } + + rcj, err := signedMsg.Message.GetRoundChangeJustifications() + if err != nil { + e := ErrMalformedRoundChangeJustifications + e.innerErr = err + return e + } + + if len(rcj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType && signedMsg.Message.MsgType != specqbft.RoundChangeMsgType { + e := ErrUnexpectedRoundChangeJustifications + e.got = signedMsg.Message.MsgType + return e + } + + if signedMsg.Message.MsgType == specqbft.ProposalMsgType { + cfg := newQBFTConfig(mv.netCfg.Domain, mv.verifySignatures) + + if err := instance.IsProposalJustification( + cfg, + share, + rcj, + pj, + signedMsg.Message.Height, + signedMsg.Message.Round, + signedMsg.FullData, + ); err != nil { + e := ErrInvalidJustifications + e.innerErr = err + return e + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorConsensus( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *specqbft.SignedMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return mv.validateJustifications(share, signedMsg) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + if msgSlot == signerState.Slot && msgRound < signerState.Round { + // Signers aren't allowed to decrease their round. + // If they've sent a future message due to clock error, + // they'd have to wait for the next slot/round to be accepted. + err := ErrRoundAlreadyAdvanced + err.want = signerState.Round + err.got = msgRound + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if !(msgSlot > signerState.Slot || msgSlot == signerState.Slot && msgRound > signerState.Round) { + if mv.hasFullData(signedMsg) && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedMsg.FullData) { + return ErrDuplicatedProposalWithDifferentData + } + + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidateConsensusMessage(signedMsg, limits); err != nil { + return err + } + } + + return mv.validateJustifications(share, signedMsg) +} + +func (mv *messageValidator) validateDutyCount( + state *SignerState, + msgID spectypes.MessageID, + newDutyInSameEpoch bool, +) error { + switch msgID.GetRoleType() { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator, spectypes.BNRoleValidatorRegistration: + limit := maxDutiesPerEpoch + + if sameSlot := !newDutyInSameEpoch; sameSlot { + limit++ + } + + if state.EpochDuties >= limit { + err := ErrTooManyDutiesPerEpoch + err.got = fmt.Sprintf("%v (role %v)", state.EpochDuties, msgID.GetRoleType()) + err.want = fmt.Sprintf("less than %v", maxDutiesPerEpoch) + return err + } + + return nil + } + + return nil +} + +func (mv *messageValidator) validateBeaconDuty( + role spectypes.BeaconRole, + slot phase0.Slot, + share *ssvtypes.SSVShare, +) error { + switch role { + case spectypes.BNRoleProposer: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + epoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(slot) + if mv.dutyStore != nil && mv.dutyStore.Proposer.ValidatorDuty(epoch, slot, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + + case spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) + if mv.dutyStore != nil && mv.dutyStore.SyncCommittee.Duty(period, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + } + + return nil +} + +func (mv *messageValidator) hasFullData(signedMsg *specqbft.SignedMessage) bool { + return (signedMsg.Message.MsgType == specqbft.ProposalMsgType || + signedMsg.Message.MsgType == specqbft.RoundChangeMsgType || + mv.isDecidedMessage(signedMsg)) && len(signedMsg.FullData) != 0 // TODO: more complex check of FullData +} + +func (mv *messageValidator) isDecidedMessage(signedMsg *specqbft.SignedMessage) bool { + return signedMsg.Message.MsgType == specqbft.CommitMsgType && len(signedMsg.Signers) > 1 +} + +func (mv *messageValidator) maxRound(role spectypes.BeaconRole) specqbft.Round { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit + return 12 // TODO: consider calculating based on quick timeout and slow timeout + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + return 6 + case spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) specqbft.Round { + if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= roundtimer.QuickTimeoutThreshold { + return currentQuickRound + } + + sinceFirstSlowRound := sinceSlotStart - (time.Duration(roundtimer.QuickTimeoutThreshold) * roundtimer.QuickTimeout) + estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) + return estimatedRound +} + +func (mv *messageValidator) waitAfterSlotStart(role spectypes.BeaconRole) time.Duration { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + return mv.netCfg.Beacon.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + return mv.netCfg.Beacon.SlotDurationSec() / 3 * 2 + case spectypes.BNRoleProposer, spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) validRole(roleType spectypes.BeaconRole) bool { + switch roleType { + case spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + spectypes.BNRoleValidatorRegistration: + return true + } + return false +} + +func (mv *messageValidator) validQBFTMsgType(msgType specqbft.MessageType) bool { + switch msgType { + case specqbft.ProposalMsgType, specqbft.PrepareMsgType, specqbft.CommitMsgType, specqbft.RoundChangeMsgType: + return true + } + return false +} + +func (mv *messageValidator) validConsensusSigners(share *ssvtypes.SSVShare, m *specqbft.SignedMessage) error { + switch { + case len(m.Signers) == 0: + return ErrNoSigners + + case len(m.Signers) == 1: + if m.Message.MsgType == specqbft.ProposalMsgType { + qbftState := &specqbft.State{ + Height: m.Message.Height, + Share: &share.Share, + } + leader := specqbft.RoundRobinProposer(qbftState, m.Message.Round) + if m.Signers[0] != leader { + err := ErrSignerNotLeader + err.got = m.Signers[0] + err.want = leader + return err + } + } + + case m.Message.MsgType != specqbft.CommitMsgType: + e := ErrNonDecidedWithMultipleSigners + e.got = len(m.Signers) + return e + + case !share.HasQuorum(len(m.Signers)) || len(m.Signers) > len(share.Committee): + e := ErrWrongSignersLength + e.want = fmt.Sprintf("between %v and %v", share.Quorum, len(share.Committee)) + e.got = len(m.Signers) + return e + } + + if !slices.IsSorted(m.Signers) { + return ErrSignersNotSorted + } + + var prevSigner spectypes.OperatorID + for _, signer := range m.Signers { + if err := mv.commonSignerValidation(signer, share); err != nil { + return err + } + if signer == prevSigner { + return ErrDuplicatedSigner + } + prevSigner = signer + } + return nil +} diff --git a/message/validation/consensus_validation_test.go b/message/validation/consensus_validation_test.go new file mode 100644 index 0000000000..5f0ae02df1 --- /dev/null +++ b/message/validation/consensus_validation_test.go @@ -0,0 +1,104 @@ +package validation + +import ( + "testing" + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" +) + +func TestMessageValidator_currentEstimatedRound(t *testing.T) { + tt := []struct { + name string + sinceSlotStart time.Duration + want specqbft.Round + }{ + { + name: "0s - expected first round", + sinceSlotStart: 0, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout/2 - expected first round", + sinceSlotStart: roundtimer.QuickTimeout / 2, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout - expected first+1 round", + sinceSlotStart: roundtimer.QuickTimeout, + want: specqbft.FirstRound + 1, + }, + { + name: "QuickTimeout*2 - expected first+2 round", + sinceSlotStart: roundtimer.QuickTimeout * 2, + want: specqbft.FirstRound + 2, + }, + { + name: "QuickTimeout*3 - expected first+3 round", + sinceSlotStart: roundtimer.QuickTimeout * 3, + want: specqbft.FirstRound + 3, + }, + { + name: "QuickTimeout*4 - expected first+4 round", + sinceSlotStart: roundtimer.QuickTimeout * 4, + want: specqbft.FirstRound + 4, + }, + { + name: "QuickTimeout*5 - expected first+5 round", + sinceSlotStart: roundtimer.QuickTimeout * 5, + want: specqbft.FirstRound + 5, + }, + { + name: "QuickTimeout*6 - expected first+6 round", + sinceSlotStart: roundtimer.QuickTimeout * 6, + want: specqbft.FirstRound + 6, + }, + { + name: "QuickTimeout*7 - expected first+7 round", + sinceSlotStart: roundtimer.QuickTimeout * 7, + want: specqbft.FirstRound + 7, + }, + { + name: "QuickTimeout*8 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * 8, + want: specqbft.FirstRound + 8, + }, + { + name: "QuickTimeout*9 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+1), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "QuickTimeout*10 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+2), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "(QuickTimeout*8 + SlowTimeout) - expected first+9 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout, + want: roundtimer.QuickTimeoutThreshold + 2, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*2) - expected first+10 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*2, + want: roundtimer.QuickTimeoutThreshold + 3, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*3) - expected first+11 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*3, + want: roundtimer.QuickTimeoutThreshold + 4, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + mv := &messageValidator{} + got := mv.currentEstimatedRound(tc.sinceSlotStart) + require.Equal(t, tc.want, got) + }) + } +} diff --git a/message/validation/errors.go b/message/validation/errors.go new file mode 100644 index 0000000000..f27d3b4901 --- /dev/null +++ b/message/validation/errors.go @@ -0,0 +1,100 @@ +package validation + +import ( + "fmt" + "strings" +) + +type Error struct { + text string + got any + want any + innerErr error + reject bool + silent bool +} + +func (e Error) Error() string { + var sb strings.Builder + sb.WriteString(e.text) + + if e.got != nil { + sb.WriteString(fmt.Sprintf(", got %v", e.got)) + } + if e.want != nil { + sb.WriteString(fmt.Sprintf(", want %v", e.want)) + } + if e.innerErr != nil { + sb.WriteString(fmt.Sprintf(": %s", e.innerErr.Error())) + } + + return sb.String() +} + +func (e Error) Reject() bool { + return e.reject +} + +func (e Error) Silent() bool { + return e.silent +} + +func (e Error) Text() string { + return e.text +} + +var ( + ErrEmptyData = Error{text: "empty data"} + ErrWrongDomain = Error{text: "wrong domain"} + ErrNoShareMetadata = Error{text: "share has no metadata"} + ErrUnknownValidator = Error{text: "unknown validator"} + ErrValidatorLiquidated = Error{text: "validator is liquidated"} + ErrValidatorNotAttesting = Error{text: "validator is not attesting"} + ErrSlotAlreadyAdvanced = Error{text: "signer has already advanced to a later slot"} + ErrRoundAlreadyAdvanced = Error{text: "signer has already advanced to a later round"} + ErrRoundTooHigh = Error{text: "round is too high for this role" /*, reject: true*/} // TODO: enable reject + ErrEarlyMessage = Error{text: "early message"} + ErrLateMessage = Error{text: "late message"} + ErrTooManySameTypeMessagesPerRound = Error{text: "too many messages of same type per round"} + ErrPubSubMessageHasNoData = Error{text: "pub-sub message has no data", reject: true} + ErrPubSubDataTooBig = Error{text: "pub-sub message data too big", reject: true} + ErrMalformedPubSubMessage = Error{text: "pub-sub message is malformed", reject: true} + ErrEmptyPubSubMessage = Error{text: "pub-sub message is empty", reject: true} + ErrTopicNotFound = Error{text: "topic not found", reject: true} + ErrSSVDataTooBig = Error{text: "ssv message data too big", reject: true} + ErrInvalidRole = Error{text: "invalid role", reject: true} + ErrConsensusValidatorRegistration = Error{text: "consensus message for validator registration role", reject: true} + ErrNoSigners = Error{text: "no signers", reject: true} + ErrWrongSignatureSize = Error{text: "wrong signature size", reject: true} + ErrZeroSignature = Error{text: "zero signature", reject: true} + ErrZeroSigner = Error{text: "zero signer ID", reject: true} + ErrSignerNotInCommittee = Error{text: "signer is not in committee", reject: true} + ErrDuplicatedSigner = Error{text: "signer is duplicated", reject: true} + ErrSignerNotLeader = Error{text: "signer is not leader", reject: true} + ErrSignersNotSorted = Error{text: "signers are not sorted", reject: true} + ErrUnexpectedSigner = Error{text: "signer is not expected", reject: true} + ErrInvalidHash = Error{text: "root doesn't match full data hash", reject: true} + ErrInvalidSignature = Error{text: "invalid signature", reject: true} + ErrInvalidPartialSignature = Error{text: "invalid partial signature", reject: true} + ErrEstimatedRoundTooFar = Error{text: "message round is too far from estimated"} + ErrMalformedMessage = Error{text: "message could not be decoded", reject: true} + ErrUnknownSSVMessageType = Error{text: "unknown SSV message type", reject: true} + ErrUnknownQBFTMessageType = Error{text: "unknown QBFT message type", reject: true} + ErrUnknownPartialMessageType = Error{text: "unknown partial signature message type", reject: true} + ErrPartialSignatureTypeRoleMismatch = Error{text: "partial signature type and role don't match", reject: true} + ErrNonDecidedWithMultipleSigners = Error{text: "non-decided with multiple signers", reject: true} + ErrWrongSignersLength = Error{text: "decided signers size is not between quorum and committee size", reject: true} + ErrDuplicatedProposalWithDifferentData = Error{text: "duplicated proposal with different data", reject: true} + ErrEventMessage = Error{text: "event messages are not broadcast", reject: true} + ErrDKGMessage = Error{text: "DKG messages are not supported", reject: true} + ErrMalformedPrepareJustifications = Error{text: "malformed prepare justifications", reject: true} + ErrUnexpectedPrepareJustifications = Error{text: "prepare justifications unexpected for this message type", reject: true} + ErrMalformedRoundChangeJustifications = Error{text: "malformed round change justifications", reject: true} + ErrUnexpectedRoundChangeJustifications = Error{text: "round change justifications unexpected for this message type", reject: true} + ErrInvalidJustifications = Error{text: "invalid justifications", reject: true} + ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch", reject: true} + ErrNoDuty = Error{text: "no duty for this epoch", reject: true} + ErrDeserializePublicKey = Error{text: "deserialize public key", reject: true} + ErrNoPartialMessages = Error{text: "no partial messages", reject: true} + ErrDuplicatedPartialSignatureMessage = Error{text: "duplicated partial signature message", reject: true} +) diff --git a/message/validation/message_counts.go b/message/validation/message_counts.go new file mode 100644 index 0000000000..609ed018bc --- /dev/null +++ b/message/validation/message_counts.go @@ -0,0 +1,156 @@ +package validation + +// message_counts.go contains code for counting and validating messages per validator-slot-round. + +import ( + "fmt" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +// MessageCounts tracks the number of various message types received for validation. +type MessageCounts struct { + PreConsensus int + Proposal int + Prepare int + Commit int + Decided int + RoundChange int + PostConsensus int +} + +// String provides a formatted representation of the MessageCounts. +func (c *MessageCounts) String() string { + return fmt.Sprintf("pre-consensus: %v, proposal: %v, prepare: %v, commit: %v, decided: %v, round change: %v, post-consensus: %v", + c.PreConsensus, + c.Proposal, + c.Prepare, + c.Commit, + c.Decided, + c.RoundChange, + c.PostConsensus, + ) +} + +// ValidateConsensusMessage checks if the provided consensus message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidateConsensusMessage(msg *specqbft.SignedMessage, limits MessageCounts) error { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + if c.Proposal >= limits.Proposal { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("proposal, having %v", c.String()) + return err + } + case specqbft.PrepareMsgType: + if c.Prepare >= limits.Prepare { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("prepare, having %v", c.String()) + return err + } + case specqbft.CommitMsgType: + if len(msg.Signers) == 1 { + if c.Commit >= limits.Commit { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("commit, having %v", c.String()) + return err + } + } + if len(msg.Signers) > 1 { + if c.Decided >= limits.Decided { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("decided, having %v", c.String()) + return err + } + } + case specqbft.RoundChangeMsgType: + if c.RoundChange >= limits.RoundChange { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("round change, having %v", c.String()) + return err + } + default: + panic("unexpected signed message type") // should be checked before + } + + return nil +} + +// ValidatePartialSignatureMessage checks if the provided partial signature message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.SignedPartialSignatureMessage, limits MessageCounts) error { + switch m.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + if c.PreConsensus > limits.PreConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("pre-consensus, having %v", c.String()) + return err + } + case spectypes.PostConsensusPartialSig: + if c.PostConsensus > limits.PostConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("post-consensus, having %v", c.String()) + return err + } + default: + panic("unexpected partial signature message type") // should be checked before + } + + return nil +} + +// RecordConsensusMessage updates the counts based on the provided consensus message type. +func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + c.Proposal++ + case specqbft.PrepareMsgType: + c.Prepare++ + case specqbft.CommitMsgType: + switch { + case len(msg.Signers) == 1: + c.Commit++ + case len(msg.Signers) > 1: + c.Decided++ + default: + panic("expected signers") // 0 length should be checked before + } + case specqbft.RoundChangeMsgType: + c.RoundChange++ + default: + panic("unexpected signed message type") // should be checked before + } +} + +// RecordPartialSignatureMessage updates the counts based on the provided partial signature message type. +func (c *MessageCounts) RecordPartialSignatureMessage(msg *spectypes.SignedPartialSignatureMessage) { + switch msg.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + c.PreConsensus++ + case spectypes.PostConsensusPartialSig: + c.PostConsensus++ + default: + panic("unexpected partial signature message type") // should be checked before + } +} + +// maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. +func maxMessageCounts(committeeSize int) MessageCounts { + maxDecided := maxDecidedCount(committeeSize) + + return MessageCounts{ + PreConsensus: 1, + Proposal: 1, + Prepare: 1, + Commit: 1, + Decided: maxDecided, + RoundChange: 1, + PostConsensus: 1, + } +} + +func maxDecidedCount(committeeSize int) int { + f := (committeeSize - 1) / 3 + return committeeSize * (f + 1) // N * (f + 1) +} diff --git a/message/validation/metrics.go b/message/validation/metrics.go new file mode 100644 index 0000000000..f023fe0689 --- /dev/null +++ b/message/validation/metrics.go @@ -0,0 +1,38 @@ +package validation + +import ( + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +type metrics interface { + MessageAccepted(role spectypes.BeaconRole, round specqbft.Round) + MessageIgnored(reason string, role spectypes.BeaconRole, round specqbft.Round) + MessageRejected(reason string, role spectypes.BeaconRole, round specqbft.Round) + SSVMessageType(msgType spectypes.MsgType) + ConsensusMsgType(msgType specqbft.MessageType, signers int) + MessageValidationDuration(duration time.Duration, labels ...string) + SignatureValidationDuration(duration time.Duration, labels ...string) + MessageSize(size int) + ActiveMsgValidation(topic string) + ActiveMsgValidationDone(topic string) + InCommitteeMessage(msgType spectypes.MsgType, decided bool) + NonCommitteeMessage(msgType spectypes.MsgType, decided bool) +} + +type nopMetrics struct{} + +func (*nopMetrics) ConsensusMsgType(specqbft.MessageType, int) {} +func (*nopMetrics) MessageAccepted(spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageIgnored(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageRejected(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) SSVMessageType(spectypes.MsgType) {} +func (*nopMetrics) MessageValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) SignatureValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) MessageSize(int) {} +func (*nopMetrics) ActiveMsgValidation(string) {} +func (*nopMetrics) ActiveMsgValidationDone(string) {} +func (*nopMetrics) InCommitteeMessage(spectypes.MsgType, bool) {} +func (*nopMetrics) NonCommitteeMessage(spectypes.MsgType, bool) {} diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go new file mode 100644 index 0000000000..781267f22d --- /dev/null +++ b/message/validation/partial_validation.go @@ -0,0 +1,251 @@ +package validation + +// partial_validation.go contains methods for validating partial signature messages + +import ( + "encoding/hex" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/herumi/bls-eth-go-binary/bls" + "golang.org/x/exp/slices" + + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validatePartialSignatureMessage( + share *ssvtypes.SSVShare, + signedMsg *spectypes.SignedPartialSignatureMessage, + msgID spectypes.MessageID, +) (phase0.Slot, error) { + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } + + msgSlot := signedMsg.Message.Slot + + if !mv.validPartialSigMsgType(signedMsg.Message.Type) { + e := ErrUnknownPartialMessageType + e.got = signedMsg.Message.Type + return msgSlot, e + } + + role := msgID.GetRoleType() + if !mv.partialSignatureTypeMatchesRole(signedMsg.Message.Type, role) { + return msgSlot, ErrPartialSignatureTypeRoleMismatch + } + + if err := mv.validatePartialMessages(share, signedMsg); err != nil { + return msgSlot, err + } + + state := mv.consensusState(msgID) + signerState := state.GetSignerState(signedMsg.Signer) + if signerState != nil { + if err := mv.validateSignerBehaviorPartial(state, signedMsg.Signer, share, msgID, signedMsg); err != nil { + return msgSlot, err + } + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return msgSlot, err + } + + if mv.verifySignatures { + if err := mv.validPartialSignatures(share, signedMsg); err != nil { + return msgSlot, err + } + } + + if signerState == nil { + signerState = state.CreateSignerState(signedMsg.Signer) + } + + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, specqbft.FirstRound, newEpoch) + } + + signerState.MessageCounts.RecordPartialSignatureMessage(signedMsg) + + return msgSlot, nil +} + +func (mv *messageValidator) inCommittee(share *ssvtypes.SSVShare) bool { + return slices.ContainsFunc(share.Committee, func(operator *spectypes.Operator) bool { + return operator.OperatorID == mv.ownOperatorID + }) +} + +func (mv *messageValidator) validPartialSigMsgType(msgType spectypes.PartialSigMsgType) bool { + switch msgType { + case spectypes.PostConsensusPartialSig, + spectypes.RandaoPartialSig, + spectypes.SelectionProofPartialSig, + spectypes.ContributionProofs, + spectypes.ValidatorRegistrationPartialSig: + return true + default: + return false + } +} + +func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType spectypes.PartialSigMsgType, role spectypes.BeaconRole) bool { + switch role { + case spectypes.BNRoleAttester: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleAggregator: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.SelectionProofPartialSig + case spectypes.BNRoleProposer: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.RandaoPartialSig + case spectypes.BNRoleSyncCommittee: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleSyncCommitteeContribution: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.ContributionProofs + case spectypes.BNRoleValidatorRegistration: + return msgType == spectypes.ValidatorRegistrationPartialSig + default: + panic("invalid role") // role validity should be checked before + } +} + +func (mv *messageValidator) validPartialSignatures(share *ssvtypes.SSVShare, signedMsg *spectypes.SignedPartialSignatureMessage) error { + if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.PartialSignatureType, share.Committee); err != nil { + signErr := ErrInvalidSignature + signErr.innerErr = err + signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) + return signErr + } + + for _, message := range signedMsg.Message.Messages { + if err := mv.verifyPartialSignature(message, share); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) verifyPartialSignature(msg *spectypes.PartialSignatureMessage, share *ssvtypes.SSVShare) error { + signer := msg.Signer + signature := msg.PartialSignature + root := msg.SigningRoot + + for _, n := range share.Committee { + if n.GetID() != signer { + continue + } + + pk, err := ssvtypes.DeserializeBLSPublicKey(n.GetPublicKey()) + if err != nil { + return fmt.Errorf("deserialize pk: %w", err) + } + sig := &bls.Sign{} + if err := sig.Deserialize(signature); err != nil { + return fmt.Errorf("deserialize signature: %w", err) + } + + if !mv.aggregateVerify(sig, pk, root) { + return ErrInvalidPartialSignature + } + + return nil + } + + return ErrSignerNotInCommittee +} + +func (mv *messageValidator) aggregateVerify(sig *bls.Sign, pk bls.PublicKey, root [32]byte) bool { + start := time.Now() + + valid := sig.FastAggregateVerify([]bls.PublicKey{pk}, root[:]) + + sinceStart := time.Since(start) + mv.metrics.SignatureValidationDuration(sinceStart) + + return valid +} + +func (mv *messageValidator) validatePartialMessages(share *ssvtypes.SSVShare, m *spectypes.SignedPartialSignatureMessage) error { + if err := mv.commonSignerValidation(m.Signer, share); err != nil { + return err + } + + if len(m.Message.Messages) == 0 { + return ErrNoPartialMessages + } + + seen := map[[32]byte]struct{}{} + for _, message := range m.Message.Messages { + if _, ok := seen[message.SigningRoot]; ok { + return ErrDuplicatedPartialSignatureMessage + } + seen[message.SigningRoot] = struct{}{} + + if message.Signer != m.Signer { + err := ErrUnexpectedSigner + err.want = m.Signer + err.got = message.Signer + return err + } + + if err := mv.commonSignerValidation(message.Signer, share); err != nil { + return err + } + + if err := mv.validateSignatureFormat(message.PartialSignature); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorPartial( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *spectypes.SignedPartialSignatureMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return nil + } + + msgSlot := signedMsg.Message.Slot + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if msgSlot <= signerState.Slot { + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidatePartialSignatureMessage(signedMsg, limits); err != nil { + return err + } + } + + return nil +} diff --git a/message/validation/qbft_config.go b/message/validation/qbft_config.go new file mode 100644 index 0000000000..fe5ed6dc04 --- /dev/null +++ b/message/validation/qbft_config.go @@ -0,0 +1,53 @@ +package validation + +import ( + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" +) + +type qbftConfig struct { + domain spectypes.DomainType + verifySignature bool +} + +func newQBFTConfig(domain spectypes.DomainType, verifySignature bool) qbftConfig { + return qbftConfig{ + domain: domain, + verifySignature: verifySignature, + } +} + +func (q qbftConfig) GetSigner() spectypes.SSVSigner { + panic("should not be called") +} + +func (q qbftConfig) GetSignatureDomainType() spectypes.DomainType { + return q.domain +} + +func (q qbftConfig) GetValueCheckF() specqbft.ProposedValueCheckF { + panic("should not be called") +} + +func (q qbftConfig) GetProposerF() specqbft.ProposerF { + panic("should not be called") +} + +func (q qbftConfig) GetNetwork() specqbft.Network { + panic("should not be called") +} + +func (q qbftConfig) GetStorage() qbftstorage.QBFTStore { + panic("should not be called") +} + +func (q qbftConfig) GetTimer() roundtimer.Timer { + panic("should not be called") +} + +func (q qbftConfig) VerifySignatures() bool { + return q.verifySignature +} diff --git a/message/validation/signer_state.go b/message/validation/signer_state.go new file mode 100644 index 0000000000..dc9bf1818e --- /dev/null +++ b/message/validation/signer_state.go @@ -0,0 +1,45 @@ +package validation + +// signer_state.go describes state of a signer. + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" +) + +// SignerState represents the state of a signer, including its start time, slot, round, +// message counts, proposal data, and the number of duties performed in the current epoch. +type SignerState struct { + Start time.Time + Slot phase0.Slot + Round specqbft.Round + MessageCounts MessageCounts + ProposalData []byte + EpochDuties int +} + +// ResetSlot resets the state's slot, round, message counts, and proposal data to the given values. +// It also updates the start time to the current time and increments the epoch duties count if it's a new epoch. +func (s *SignerState) ResetSlot(slot phase0.Slot, round specqbft.Round, newEpoch bool) { + s.Start = time.Now() + s.Slot = slot + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil + if newEpoch { + s.EpochDuties = 1 + } else { + s.EpochDuties++ + } +} + +// ResetRound resets the state's round, message counts, and proposal data to the given values. +// It also updates the start time to the current time. +func (s *SignerState) ResetRound(round specqbft.Round) { + s.Start = time.Now() + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil +} diff --git a/message/validation/validation.go b/message/validation/validation.go new file mode 100644 index 0000000000..98e100fa3c --- /dev/null +++ b/message/validation/validation.go @@ -0,0 +1,556 @@ +// Package validation provides functions and structures for validating messages. +package validation + +// validator.go contains main code for validation and most of the rule checks. + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "strings" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/cornelk/hashmap" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +const ( + // lateMessageMargin is the duration past a message's TTL in which it is still considered valid. + lateMessageMargin = time.Second * 3 + + // clockErrorTolerance is the maximum amount of clock error we expect to see between nodes. + clockErrorTolerance = time.Millisecond * 50 + + maxMessageSize = maxConsensusMsgSize + maxConsensusMsgSize = 8388608 + maxPartialSignatureMsgSize = 1952 + allowedRoundsInFuture = 1 + allowedRoundsInPast = 2 + lateSlotAllowance = 2 + signatureSize = 96 + maxDutiesPerEpoch = 2 +) + +// ConsensusID uniquely identifies a public key and role pair to keep track of state. +type ConsensusID struct { + PubKey phase0.BLSPubKey + Role spectypes.BeaconRole +} + +// ConsensusState keeps track of the signers for a given public key and role. +type ConsensusState struct { + // TODO: consider evicting old data to avoid excessive memory consumption + Signers *hashmap.Map[spectypes.OperatorID, *SignerState] +} + +// GetSignerState retrieves the state for the given signer. +// Returns nil if the signer is not found. +func (cs *ConsensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { + signerState, _ := cs.Signers.Get(signer) + return signerState +} + +// CreateSignerState initializes and sets a new SignerState for the given signer. +func (cs *ConsensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { + signerState := &SignerState{} + cs.Signers.Set(signer, signerState) + + return signerState +} + +// PubsubMessageValidator defines methods for validating pubsub messages. +type PubsubMessageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult + ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} + +// SSVMessageValidator defines methods for validating SSV messages. +type SSVMessageValidator interface { + ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) +} + +// MessageValidator is an interface that combines both PubsubMessageValidator and SSVMessageValidator. +type MessageValidator interface { + PubsubMessageValidator + SSVMessageValidator +} + +type messageValidator struct { + logger *zap.Logger + metrics metrics + netCfg networkconfig.NetworkConfig + index sync.Map + shareStorage registrystorage.Shares + dutyStore *dutystore.Store + ownOperatorID spectypes.OperatorID + verifySignatures bool +} + +// NewMessageValidator returns a new MessageValidator with the given network configuration and options. +func NewMessageValidator(netCfg networkconfig.NetworkConfig, opts ...Option) MessageValidator { + mv := &messageValidator{ + logger: zap.NewNop(), + metrics: &nopMetrics{}, + netCfg: netCfg, + } + + for _, opt := range opts { + opt(mv) + } + + return mv +} + +// Option represents a functional option for configuring a messageValidator. +type Option func(validator *messageValidator) + +// WithLogger sets the logger for the messageValidator. +func WithLogger(logger *zap.Logger) Option { + return func(mv *messageValidator) { + mv.logger = logger + } +} + +// WithMetrics sets the metrics for the messageValidator. +func WithMetrics(metrics metrics) Option { + return func(mv *messageValidator) { + mv.metrics = metrics + } +} + +// WithDutyStore sets the duty store for the messageValidator. +func WithDutyStore(dutyStore *dutystore.Store) Option { + return func(mv *messageValidator) { + mv.dutyStore = dutyStore + } +} + +// WithOwnOperatorID sets the operator ID for the messageValidator. +func WithOwnOperatorID(id spectypes.OperatorID) Option { + return func(mv *messageValidator) { + mv.ownOperatorID = id + } +} + +// WithShareStorage sets the share storage for the messageValidator. +func WithShareStorage(shareStorage registrystorage.Shares) Option { + return func(mv *messageValidator) { + mv.shareStorage = shareStorage + } +} + +// WithSignatureVerification sets whether to verify signatures in the messageValidator. +func WithSignatureVerification(check bool) Option { + return func(mv *messageValidator) { + mv.verifySignatures = check + } +} + +// ConsensusDescriptor provides details about the consensus for a message. It's used for logging and metrics. +type ConsensusDescriptor struct { + Round specqbft.Round + QBFTMessageType specqbft.MessageType + Signers []spectypes.OperatorID + Committee []*spectypes.Operator +} + +// Descriptor provides details about a message. It's used for logging and metrics. +type Descriptor struct { + ValidatorPK spectypes.ValidatorPK + Role spectypes.BeaconRole + SSVMessageType spectypes.MsgType + Slot phase0.Slot + Consensus *ConsensusDescriptor +} + +// Fields returns zap logging fields for the descriptor. +func (d Descriptor) Fields() []zapcore.Field { + result := []zapcore.Field{ + fields.Validator(d.ValidatorPK), + fields.Role(d.Role), + zap.String("ssv_message_type", ssvmessage.MsgTypeToString(d.SSVMessageType)), + fields.Slot(d.Slot), + } + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + result = append(result, + fields.Round(d.Consensus.Round), + zap.String("qbft_message_type", ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType)), + zap.Uint64s("signers", d.Consensus.Signers), + zap.Uint64s("committee", committee), + ) + } + + return result +} + +// String provides a string representation of the descriptor. It may be useful for logging. +func (d Descriptor) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("validator PK: %v, role: %v, ssv message type: %v, slot: %v", + hex.EncodeToString(d.ValidatorPK), + d.Role.String(), + ssvmessage.MsgTypeToString(d.SSVMessageType), + d.Slot, + )) + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + sb.WriteString(fmt.Sprintf(", round: %v, qbft message type: %v, signers: %v, committee: %v", + d.Consensus.Round, + ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType), + d.Consensus.Signers, + committee, + )) + } + + return sb.String() +} + +// ValidatorForTopic returns a validation function for the given topic. +// This function can be used to validate messages within the libp2p pubsub framework. +func (mv *messageValidator) ValidatorForTopic(_ string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return mv.ValidatePubsubMessage +} + +// ValidatePubsubMessage validates the given pubsub message. +// Depending on the outcome, it will return one of the pubsub validation results (Accept, Ignore, or Reject). +func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, _ peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + start := time.Now() + var validationDurationLabels []string // TODO: implement + + defer func() { + sinceStart := time.Since(start) + mv.metrics.MessageValidationDuration(sinceStart, validationDurationLabels...) + }() + + decodedMessage, descriptor, err := mv.validateP2PMessage(pmsg, time.Now()) + round := specqbft.Round(0) + if descriptor.Consensus != nil { + round = descriptor.Consensus.Round + } + + if err != nil { + var valErr Error + if errors.As(err, &valErr) { + if valErr.Reject() { + if !valErr.Silent() { + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("rejecting invalid message", f...) + } + + mv.metrics.MessageRejected(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationReject + } + + if !valErr.Silent() { + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + } + mv.metrics.MessageIgnored(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationIgnore + } + + mv.metrics.MessageIgnored(err.Error(), descriptor.Role, round) + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + return pubsub.ValidationIgnore + } + + pmsg.ValidatorData = decodedMessage + + mv.metrics.MessageAccepted(descriptor.Role, round) + + return pubsub.ValidationAccept +} + +// ValidateSSVMessage validates the given SSV message. +// If successful, it returns the decoded message and its descriptor. Otherwise, it returns an error. +func (mv *messageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) { + return mv.validateSSVMessage(ssvMessage, time.Now()) +} + +func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + topic := pMsg.GetTopic() + + mv.metrics.ActiveMsgValidation(topic) + defer mv.metrics.ActiveMsgValidationDone(topic) + + messageData := pMsg.GetData() + if len(messageData) == 0 { + return nil, Descriptor{}, ErrPubSubMessageHasNoData + } + + mv.metrics.MessageSize(len(messageData)) + + // Max possible MsgType + MsgID + Data plus 10% for encoding overhead + const maxMsgSize = 4 + 56 + 8388668 + const maxEncodedMsgSize = maxMsgSize + maxMsgSize/10 + if len(messageData) > maxEncodedMsgSize { + e := ErrPubSubDataTooBig + e.got = len(messageData) + return nil, Descriptor{}, e + } + + msg, err := commons.DecodeNetworkMsg(messageData) + if err != nil { + e := ErrMalformedPubSubMessage + e.innerErr = err + return nil, Descriptor{}, e + } + + if msg == nil { + return nil, Descriptor{}, ErrEmptyPubSubMessage + } + + // Check if the message was sent on the right topic. + currentTopic := pMsg.GetTopic() + currentTopicBaseName := commons.GetTopicBaseName(currentTopic) + topics := commons.ValidatorTopicID(msg.GetID().GetPubKey()) + + topicFound := false + for _, tp := range topics { + if tp == currentTopicBaseName { + topicFound = true + break + } + } + if !topicFound { + return nil, Descriptor{}, ErrTopicNotFound + } + + mv.metrics.SSVMessageType(msg.MsgType) + + return mv.validateSSVMessage(msg, receivedAt) +} + +func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + var descriptor Descriptor + + if len(ssvMessage.Data) == 0 { + return nil, descriptor, ErrEmptyData + } + + if len(ssvMessage.Data) > maxMessageSize { + err := ErrSSVDataTooBig + err.got = len(ssvMessage.Data) + err.want = maxMessageSize + return nil, descriptor, err + } + + if !bytes.Equal(ssvMessage.MsgID.GetDomain(), mv.netCfg.Domain[:]) { + err := ErrWrongDomain + err.got = hex.EncodeToString(ssvMessage.MsgID.GetDomain()) + err.want = hex.EncodeToString(mv.netCfg.Domain[:]) + return nil, descriptor, err + } + + validatorPK := ssvMessage.GetID().GetPubKey() + role := ssvMessage.GetID().GetRoleType() + descriptor.Role = role + descriptor.ValidatorPK = validatorPK + + if !mv.validRole(role) { + return nil, descriptor, ErrInvalidRole + } + + publicKey, err := ssvtypes.DeserializeBLSPublicKey(validatorPK) + if err != nil { + e := ErrDeserializePublicKey + e.innerErr = err + return nil, descriptor, e + } + + var share *ssvtypes.SSVShare + if mv.shareStorage != nil { + share = mv.shareStorage.Get(nil, publicKey.Serialize()) + if share == nil { + e := ErrUnknownValidator + e.got = publicKey.SerializeToHexStr() + return nil, descriptor, e + } + + if share.Liquidated { + return nil, descriptor, ErrValidatorLiquidated + } + + if share.BeaconMetadata == nil { + return nil, descriptor, ErrNoShareMetadata + } + + if !share.BeaconMetadata.IsAttesting() { + err := ErrValidatorNotAttesting + err.got = share.BeaconMetadata.Status.String() + return nil, descriptor, err + } + } + + msg, err := queue.DecodeSSVMessage(ssvMessage) + if err != nil { + if errors.Is(err, queue.ErrUnknownMessageType) { + e := ErrUnknownSSVMessageType + e.got = ssvMessage.GetType() + return nil, descriptor, e + } + + e := ErrMalformedMessage + e.innerErr = err + return nil, descriptor, e + } + + descriptor.SSVMessageType = ssvMessage.MsgType + + if mv.shareStorage != nil { + switch ssvMessage.MsgType { + case spectypes.SSVConsensusMsgType: + if len(msg.Data) > maxConsensusMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxConsensusMsgSize + return nil, descriptor, e + } + + consensusDescriptor, slot, err := mv.validateConsensusMessage(share, msg.Body.(*specqbft.SignedMessage), msg.GetID(), receivedAt) + descriptor.Consensus = &consensusDescriptor + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case spectypes.SSVPartialSignatureMsgType: + if len(msg.Data) > maxPartialSignatureMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxPartialSignatureMsgSize + return nil, descriptor, e + } + + slot, err := mv.validatePartialSignatureMessage(share, msg.Body.(*spectypes.SignedPartialSignatureMessage), msg.GetID()) + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case ssvmessage.SSVEventMsgType: + return nil, descriptor, ErrEventMessage + + case spectypes.DKGMsgType: + return nil, descriptor, ErrDKGMessage + } + } + + return msg, descriptor, nil +} + +func (mv *messageValidator) containsSignerFunc(signer spectypes.OperatorID) func(operator *spectypes.Operator) bool { + return func(operator *spectypes.Operator) bool { + return operator.OperatorID == signer + } +} + +func (mv *messageValidator) validateSignatureFormat(signature []byte) error { + if len(signature) != signatureSize { + e := ErrWrongSignatureSize + e.got = len(signature) + return e + } + + if [signatureSize]byte(signature) == [signatureSize]byte{} { + return ErrZeroSignature + } + return nil +} + +func (mv *messageValidator) commonSignerValidation(signer spectypes.OperatorID, share *ssvtypes.SSVShare) error { + if signer == 0 { + return ErrZeroSigner + } + + if !slices.ContainsFunc(share.Committee, mv.containsSignerFunc(signer)) { + return ErrSignerNotInCommittee + } + + return nil +} + +func (mv *messageValidator) validateSlotTime(messageSlot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) error { + if mv.earlyMessage(messageSlot, receivedAt) { + return ErrEarlyMessage + } + + if lateness := mv.lateMessage(messageSlot, role, receivedAt); lateness > 0 { + e := ErrLateMessage + e.got = fmt.Sprintf("late by %v", lateness) + return e + } + + return nil +} + +func (mv *messageValidator) earlyMessage(slot phase0.Slot, receivedAt time.Time) bool { + return mv.netCfg.Beacon.GetSlotEndTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Add(-clockErrorTolerance).Before(mv.netCfg.Beacon.GetSlotStartTime(slot)) +} + +func (mv *messageValidator) lateMessage(slot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) time.Duration { + var ttl phase0.Slot + switch role { + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + ttl = 1 + lateSlotAllowance + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: + ttl = 32 + lateSlotAllowance + case spectypes.BNRoleValidatorRegistration: + return 0 + } + + deadline := mv.netCfg.Beacon.GetSlotStartTime(slot + ttl). + Add(lateMessageMargin).Add(clockErrorTolerance) + + return mv.netCfg.Beacon.GetSlotStartTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Sub(deadline) +} + +func (mv *messageValidator) consensusState(messageID spectypes.MessageID) *ConsensusState { + id := ConsensusID{ + PubKey: phase0.BLSPubKey(messageID.GetPubKey()), + Role: messageID.GetRoleType(), + } + + if _, ok := mv.index.Load(id); !ok { + cs := &ConsensusState{ + Signers: hashmap.New[spectypes.OperatorID, *SignerState](), + } + mv.index.Store(id, cs) + } + + cs, _ := mv.index.Load(id) + return cs.(*ConsensusState) +} diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go new file mode 100644 index 0000000000..b307e05049 --- /dev/null +++ b/message/validation/validation_test.go @@ -0,0 +1,1774 @@ +package validation + +import ( + "bytes" + "encoding/hex" + "math" + "testing" + "time" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/herumi/bls-eth-go-binary/bls" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pspb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/stretchr/testify/require" + eth2types "github.com/wealdtech/go-eth2-types/v2" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" +) + +func Test_ValidateSSVMessage(t *testing.T) { + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + const validatorIndex = 123 + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + Index: validatorIndex, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + netCfg := networkconfig.TestNetwork + + roleAttester := spectypes.BNRoleAttester + + // Message validation happy flow, messages are not ignored or rejected and there are no errors + t.Run("happy flow", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + }) + + // Make sure messages are incremented and throw an ignore message if more than 1 for a commit + t.Run("message counts", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + state := validator.consensusState(msgID) + for i := spectypes.OperatorID(1); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedMsg, err := signedMsg.Encode() + require.NoError(t, err) + + ssvMsg := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMsg, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.NoError(t, err) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + state1 := state.GetSignerState(1) + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Proposal: 1}, state1.MessageCounts) + for i := spectypes.OperatorID(2); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg = spectestingutils.TestingPrepareMessageWithParams(ks.Shares[1], 1, 2, height, spectestingutils.TestingIdentifier, spectestingutils.TestingQBFTRootData) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.NoError(t, err) + + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 2, state1.Round) + require.EqualValues(t, MessageCounts{Prepare: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMultiSignerMessageWithHeight([]*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1, Decided: 1}, state1.MessageCounts) + }) + + // Send a pubsub message with no data should cause an error + t.Run("pubsub message has no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + pmsg := &pubsub.Message{} + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err := validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorIs(t, err, ErrPubSubMessageHasNoData) + }) + + // Send a pubsub message where there is too much data should cause an error + t.Run("pubsub data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: bytes.Repeat([]byte{1}, 10_000_000), + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + e := ErrPubSubDataTooBig + e.got = 10_000_000 + require.ErrorIs(t, err, e) + }) + + // Send a malformed pubsub message (empty message) should return an error + t.Run("empty pubsub message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: []byte{1}, + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + }) + + // Send a message with incorrect data (unable to decode incorrect message type) + t.Run("bad data format", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{1}, 500), + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send a message with no data should return an error + t.Run("no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{}, + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + require.ErrorIs(t, err, ErrEmptyData) + + message = &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: nil, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorIs(t, err, ErrEmptyData) + }) + + // Send a message where there is too much data should cause an error + t.Run("data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + const tooBigMsgSize = maxMessageSize * 2 + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, tooBigMsgSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrSSVDataTooBig + expectedErr.got = tooBigMsgSize + expectedErr.want = maxMessageSize + require.ErrorIs(t, err, expectedErr) + }) + + // Send exact allowed data size amount but with invalid data (fails to decode) + t.Run("data size borderline / malformed message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, maxMessageSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send an invalid SSV message type returns an error + t.Run("invalid SSV message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: math.MaxUint64, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{0x1}, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrUnknownSSVMessageType.Error()) + }) + + // Empty validator public key returns an error + t.Run("empty validator public key", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, spectypes.ValidatorPK{}, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrDeserializePublicKey.Error()) + }) + + // Generate random validator and validate it is unknown to the network + t.Run("unknown validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + sk, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, sk.PublicKey().Marshal(), roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrUnknownValidator + expectedErr.got = hex.EncodeToString(sk.PublicKey().Marshal()) + require.ErrorIs(t, err, expectedErr) + }) + + // Make sure messages are dropped if on the incorrect network + t.Run("wrong domain", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + wrongDomain := spectypes.DomainType{math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8} + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(wrongDomain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrWrongDomain + expectedErr.got = hex.EncodeToString(wrongDomain[:]) + expectedErr.want = hex.EncodeToString(netCfg.Domain[:]) + require.ErrorIs(t, err, expectedErr) + }) + + // Send message with a value that refers to a non-existent role + t.Run("invalid role", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, math.MaxUint64), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrInvalidRole) + }) + + // Perform validator registration with a consensus type message will give an error + t.Run("consensus validator registration", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleValidatorRegistration), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrConsensusValidatorRegistration) + }) + + // Ignore messages related to a validator that is liquidated + t.Run("liquidated validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + liquidatedSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + liquidatedShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: true, + }, + } + liquidatedShare.ValidatorPubKey = liquidatedSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, liquidatedShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, liquidatedShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrValidatorLiquidated + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, liquidatedShare.ValidatorPubKey)) + }) + + // Ignore messages related to a validator that is not active + t.Run("inactive validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + inactiveShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateUnknown, + }, + Liquidated: false, + }, + } + inactiveShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, inactiveShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, inactiveShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrValidatorNotAttesting + expectedErr.got = eth2apiv1.ValidatorStateUnknown.String() + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, inactiveShare.ValidatorPubKey)) + }) + + // Unable to process a message with a validator that is not on the network + t.Run("no share metadata", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + noMetadataSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + noMetadataShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: nil, + Liquidated: false, + }, + } + noMetadataShare.ValidatorPubKey = noMetadataSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, noMetadataShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, noMetadataShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoShareMetadata) + + require.NoError(t, ns.Shares().Delete(nil, noMetadataShare.ValidatorPubKey)) + }) + + // Receive error if more than 2 attestation duties in an epoch + t.Run("too many duties", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+4) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+8) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) + }) + + // Throw error if getting a message for proposal and see there is no message from beacon + t.Run("no proposal duties", func(t *testing.T) { + const epoch = 1 + slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) + height := specqbft.Height(slot) + + dutyStore := dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex+1, ð2apiv1.ProposerDuty{}, true) + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleProposer), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + require.ErrorContains(t, err, ErrNoDuty.Error()) + + dutyStore = dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) + validator = NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + require.NoError(t, err) + }) + + // Get error when receiving a message with over 13 partial signatures + t.Run("partial message too big", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + for i := 0; i < 13; i++ { + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + } + + _, err := msg.Encode() + require.ErrorContains(t, err, "max expected 13 and 14 found") + }) + + // Get error when receiving message from operator who is not affiliated with the validator + t.Run("signer ID not in committee", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 5, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrSignerNotInCommittee) + }) + + // Get error when receiving message from operator who is non-existent (operator id 0) + t.Run("partial zero signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 0, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving partial signature message from operator who is the incorrect signer + t.Run("partial inconsistent signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages[0].Signer = 2 + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrUnexpectedSigner + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Receive error when receiving a duplicated partial signature message + t.Run("partial duplicated message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrDuplicatedPartialSignatureMessage) + }) + + // Receive error when "partialSignatureMessages" does not contain any "partialSignatureMessage" + t.Run("no partial signature messages", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = []*spectypes.PartialSignatureMessage{} + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoPartialMessages) + }) + + // Receive error when the partial signature message is not enough bytes + t.Run("partial wrong signature size", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = []byte{1} + + encoded, err := msg.Encode() + require.ErrorContains(t, err, "bytes array does not have the correct length") + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Get error when receiving a partial signature message with an invalid signature + t.Run("partial wrong signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = bytes.Repeat([]byte{1}, 96) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrInvalidSignature.Error()) + }) + + // Run partial message type validation tests + t.Run("partial message type validation", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(162304) + + // Check happy flow of a duty for each role + t.Run("valid", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleAggregator: {spectypes.PostConsensusPartialSig, spectypes.SelectionProofPartialSig}, + spectypes.BNRoleProposer: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.PostConsensusPartialSig, spectypes.ContributionProofs}, + spectypes.BNRoleValidatorRegistration: {spectypes.ValidatorRegistrationPartialSig}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + innerSig, r, err := spectestingutils.NewTestingKeyManager().SignBeaconObject(spectypes.SSZUint64(spectestingutils.TestingDutyEpoch), phase0.Domain{}, ks.Shares[1].GetPublicKey().Serialize(), phase0.DomainType{}) + require.NoError(t, err) + + innerMsg := spectypes.PartialSignatureMessages{ + Type: msgType, + Messages: []*spectypes.PartialSignatureMessage{ + { + PartialSignature: innerSig, + SigningRoot: r, + Signer: 1, + }, + }, + } + + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(innerMsg, spectypes.PartialSignatureType, ks.Shares[1].GetPublicKey().Serialize()) + require.NoError(t, err) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: innerMsg, + Signature: sig, + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + } + } + }) + + // Get error when receiving a message with an incorrect message type + t.Run("invalid message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: math.MaxUint64, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrUnknownPartialMessageType.Error()) + }) + + // Get error when sending an unexpected message type for the required duty (sending randao for attestor duty) + t.Run("mismatch", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleAggregator: {spectypes.RandaoPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleProposer: {spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleValidatorRegistration: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: msgType, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrPartialSignatureTypeRoleMismatch.Error()) + } + } + }) + }) + + // Get error when receiving QBFT message with an invalid type + t.Run("invalid QBFT message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msg := &specqbft.Message{ + MsgType: math.MaxUint64, + Height: height, + Round: specqbft.FirstRound, + Identifier: spectestingutils.TestingIdentifier, + Root: spectestingutils.TestingQBFTRootData, + } + signedMsg := spectestingutils.SignQBFTMsg(ks.Shares[1], 1, msg) + + encodedValidSignedMessage, err := signedMsg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrUnknownQBFTMessageType + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving an incorrect signature size (too small) + t.Run("wrong signature size", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signature = []byte{0x1} + + _, err := validSignedMessage.Encode() + require.Error(t, err) + }) + + // Initialize signature tests + t.Run("zero signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Get error when receiving a consensus message with a zero signature + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + validSignedMessage.Signature = zeroSignature[:] + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSignature) + }) + + // Get error when receiving a consensus message with a zero signature + t.Run("partial signature message", func(t *testing.T) { + partialSigMessage := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + partialSigMessage.Signature = zeroSignature[:] + + encoded, err := partialSigMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorIs(t, err, ErrZeroSignature) + }) + }) + + // Get error when receiving a message with an empty list of signers + t.Run("no signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signers = []spectypes.OperatorID{} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoSigners) + }) + + // Initialize no signer tests + t.Run("zero signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + zeroSignerKS := spectestingutils.Testing7SharesSet() + zeroSignerShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(zeroSignerKS), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + zeroSignerShare.Committee[0].OperatorID = 0 + zeroSignerShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, zeroSignerShare)) + + // Get error when receiving a consensus message with a zero signer + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessage(zeroSignerKS.Shares[1], 1) + validSignedMessage.Signers = []spectypes.OperatorID{0} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving a partial message with a zero signer + t.Run("partial signature message", func(t *testing.T) { + partialSignatureMessage := spectestingutils.PostConsensusAttestationMsg(zeroSignerKS.Shares[1], 1, specqbft.Height(slot)) + partialSignatureMessage.Signer = 0 + + encoded, err := partialSignatureMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + require.NoError(t, ns.Shares().Delete(nil, zeroSignerShare.ValidatorPubKey)) + }) + + // Get error when receiving a message with duplicated signers + t.Run("non unique signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrDuplicatedSigner) + }) + + // Get error when receiving a message with non-sorted signers + t.Run("signers not sorted", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{3, 2, 1} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrSignersNotSorted) + }) + + // Get error when receiving message from non quorum size amount of signers + t.Run("wrong signers length", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrWrongSignersLength + expectedErr.got = 2 + expectedErr.want = "between 3 and 4" + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a non decided message with multiple signers + t.Run("non decided with multiple signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingMultiSignerProposalMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrNonDecidedWithMultipleSigners + expectedErr.got = 3 + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a proposal message with an invalid signature (random bytes) + t.Run("wrong signed signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + validSignedMessage.Signature = bytes.Repeat([]byte{1}, 96) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrInvalidSignature.Error()) + }) + + // Send late message for all roles and receive late message error + t.Run("late message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + tests := map[spectypes.BeaconRole]time.Time{ + spectypes.BNRoleAttester: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAttester)), + spectypes.BNRoleAggregator: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAggregator)), + spectypes.BNRoleProposer: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), + spectypes.BNRoleSyncCommittee: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommittee)), + spectypes.BNRoleSyncCommitteeContribution: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommitteeContribution)), + } + + for role, receivedAt := range tests { + role, receivedAt := role, receivedAt + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrLateMessage.Error()) + }) + } + }) + + // Send early message for all roles before the duty start and receive early message error + t.Run("early message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot - 1) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrEarlyMessage) + }) + + // Send message from non-leader acting as a leader should receive an error + t.Run("not a leader", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[2], 2, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrSignerNotLeader + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Send wrong size of data (8 bytes) for a prepare justification message should receive an error + t.Run("malformed prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.PrepareJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) + }) + + // Send prepare justification message without a proposal message should receive an error + t.Run("non-proposal with prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + nil, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingRoundChangeMessage(ks.Shares[1], spectypes.OperatorID(1)), + })) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrUnexpectedPrepareJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message without a proposal message should receive an error + t.Run("non-proposal with round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingPrepareMessage(ks.Shares[1], spectypes.OperatorID(1)), + }), + nil, + ) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrUnexpectedRoundChangeJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message with a malformed message (1 byte) should receive an error + t.Run("malformed round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.RoundChangeJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) + }) + + // Send message root hash that doesnt match the expected root hash should receive an error + t.Run("wrong root hash", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.FullData = []byte{1} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrInvalidHash + require.ErrorIs(t, err, expectedErr) + }) + + // Receive proposal from same operator twice with different messages (same round) should receive an error + t.Run("double proposal with different data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + signed2.FullData = []byte{1} + signed2.Message.Root, err = specqbft.HashDataRoot(signed2.FullData) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrDuplicatedProposalWithDifferentData + require.ErrorIs(t, err, expectedErr) + }) + + // Receive prepare from same operator twice with different messages (same round) should receive an error + t.Run("double prepare", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive commit from same operator twice with different messages (same round) should receive an error + t.Run("double commit", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive round change from same operator twice with different messages (same round) should receive an error + t.Run("double round change", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 1, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive too many decided messages should receive an error + t.Run("too many decided", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + signed := spectestingutils.TestingCommitMultiSignerMessageWithRound( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, 1) + encodedSigned, err := signed.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedSigned, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + for i := 0; i < maxDecidedCount(len(share.Committee)); i++ { + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + } + + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "decided, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 8, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive message from a round that is too high for that epoch should receive an error + t.Run("round too high", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + tests := map[spectypes.BeaconRole]specqbft.Round{ + spectypes.BNRoleAttester: 13, + spectypes.BNRoleAggregator: 13, + spectypes.BNRoleProposer: 7, + spectypes.BNRoleSyncCommittee: 7, + spectypes.BNRoleSyncCommitteeContribution: 7, + } + + for role, round := range tests { + role, round := role, round + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, round) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(validator.waitAfterSlotStart(role)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorContains(t, err, ErrRoundTooHigh.Error()) + }) + } + }) + + // Receive message from a round that is incorrect for current epoch should receive an error + t.Run("round already advanced", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 2) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 1) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorContains(t, err, ErrRoundAlreadyAdvanced.Error()) + }) + + // Initialize tests for testing when sending a message with a slot before the current one + t.Run("slot already advanced", func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Send a consensus message with a slot before the current one should cause an error + t.Run("consensus message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + signedMessage := spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + + // Send a partial signature message with a slot before the current one should cause an error + t.Run("partial signature message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height+1) + message.Message.Slot = phase0.Slot(height) + 1 + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err := message.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + message = spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height) + message.Message.Slot = phase0.Slot(height) + sig, err = spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err = message.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + }) + + // Receive an event message from an operator that is not myself should receive an error + t.Run("event message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + eventMsg := &ssvtypes.EventMsg{} + encoded, err := eventMsg.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: ssvmessage.SSVEventMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorIs(t, err, ErrEventMessage) + }) +} diff --git a/migrations/migration_2_encrypt_shares.go b/migrations/migration_2_encrypt_shares.go index 4ca0eb62c7..03c40a301d 100644 --- a/migrations/migration_2_encrypt_shares.go +++ b/migrations/migration_2_encrypt_shares.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "crypto/x509" "fmt" + "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/storage/basedb" diff --git a/monitoring/grafana/dashboard_msg_validation.json b/monitoring/grafana/dashboard_msg_validation.json new file mode 100644 index 0000000000..8ea0bd8f08 --- /dev/null +++ b/monitoring/grafana/dashboard_msg_validation.json @@ -0,0 +1,2175 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 144, + "iteration": 1695134055974, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 12, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Total", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Ignored", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Rejected", + "refId": "C" + } + ], + "title": "Message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#F2495C", + "mode": "palette-classic", + "seriesBy": "last" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 0, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 3, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Rejected", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Ignored", + "refId": "B" + } + ], + "title": "Ignore/Reject Rate", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 20, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 22, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 23, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 24, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "duplicated proposal with different data", + "late message", + "message round is too far from estimated", + "no duty for this epoch", + "round is too high for this role", + "signer has already advanced to a later slot", + "too many messages of same type per round", + "unknown validator", + "validator is not attesting" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 4, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right", + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation{instance=~\"$instance.*\", reason!=\"\"}[$__interval])) by (reason)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{reason}}", + "refId": "A" + } + ], + "title": "Validation Failure Reason", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 5, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_ssv_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by SSV type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 6, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by QBFT type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 7, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers=\"1\"}[$__interval])) by (signers)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Commit", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers!=\"1\"}[$__interval])) by (signers))", + "hide": false, + "interval": "", + "legendFormat": "Decided", + "refId": "B" + } + ], + "title": "Commit messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "id": 19, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_in_committee{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "in committee", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"decided\"}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "non-committee decided", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"non-decided\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "non-committee non-decided", + "refId": "C" + } + ], + "title": "Committee belonging RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 64 + }, + "id": 9, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Over panel interval", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 72 + }, + "id": 13, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Bytes", + "refId": "A" + } + ], + "title": "Total bytes received RPS (incoming messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 80 + }, + "id": 14, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 15, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_signature_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_signature_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Signature validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 96 + }, + "id": 17, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_incoming{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Incoming, RPS", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_outgoing{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Outgoing, RPS", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_drops{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Dropped, RPS", + "refId": "C" + } + ], + "title": "Queue message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 104 + }, + "id": 18, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_size{instance=~\"$instance.*\"}", + "hide": false, + "interval": "", + "legendFormat": "Size", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_capacity{instance=~\"$instance.*\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Capacity", + "refId": "G" + } + ], + "title": "Queue size/capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 112 + }, + "id": 16, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_time_in_queue_seconds_sum{instance=~\"$instance.*\"}[$__interval])) by (instance)\n/\nsum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval])) by (instance)\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message time in queue (seconds)", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 34, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + "hide": 1, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [ + { + "selected": false, + "text": "ssv-node-v2-1", + "value": "ssv-node-v2-1" + }, + { + "selected": false, + "text": "ssv-node-v2-2", + "value": "ssv-node-v2-2" + }, + { + "selected": false, + "text": "ssv-node-v2-3", + "value": "ssv-node-v2-3" + }, + { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + { + "selected": false, + "text": "ssv-node-v2-5", + "value": "ssv-node-v2-5" + }, + { + "selected": false, + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" + }, + { + "selected": false, + "text": "ssv-node-v2-7", + "value": "ssv-node-v2-7" + }, + { + "selected": false, + "text": "ssv-node-v2-8", + "value": "ssv-node-v2-8" + } + ], + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Message Validation", + "uid": "DppaYPgSk", + "version": 42, + "weekStart": "" +} \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_node.json b/monitoring/grafana/dashboard_ssv_node.json index d5568f9de2..47150acf91 100644 --- a/monitoring/grafana/dashboard_ssv_node.json +++ b/monitoring/grafana/dashboard_ssv_node.json @@ -22,7 +22,7 @@ "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 115, - "iteration": 1676023992743, + "iteration": 1696933836051, "links": [], "liveNow": false, "panels": [ @@ -2685,8 +2685,245 @@ "title": "Stream Protocols (time-series)", "transformations": [], "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 81 + }, + "id": 67, + "panels": [], + "title": "Vitals", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Rate per second of requests, responses and active requests.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#73BF69", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "10ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "20ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "100ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5000ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 82 + }, + "id": 65, + "maxDataPoints": 25, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.3.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5ms", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "10ms", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "20ms", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "100ms", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "500ms", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5000.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5000ms", + "refId": "F" + } + ], + "title": "Duty Execution Latency (5m)", + "transformations": [], + "type": "timeseries" } ], + "refresh": "", "schemaVersion": 34, "style": "dark", "tags": [], @@ -2695,8 +2932,8 @@ { "current": { "selected": false, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" }, "description": "", "hide": 1, @@ -2730,7 +2967,7 @@ "value": "ssv-node-v2-5" }, { - "selected": false, + "selected": true, "text": "ssv-node-v2-6", "value": "ssv-node-v2-6" }, @@ -2744,26 +2981,6 @@ "text": "ssv-node-v2-8", "value": "ssv-node-v2-8" }, - { - "selected": false, - "text": "ssv-node-9", - "value": "ssv-node-9" - }, - { - "selected": false, - "text": "ssv-node-10", - "value": "ssv-node-10" - }, - { - "selected": false, - "text": "ssv-node-11", - "value": "ssv-node-11" - }, - { - "selected": false, - "text": "ssv-node-12", - "value": "ssv-node-12" - }, { "selected": false, "text": "ssv-exporter", @@ -2773,29 +2990,9 @@ "selected": false, "text": "ssv-exporter-v2", "value": "ssv-exporter-v2" - }, - { - "selected": true, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - { - "selected": false, - "text": "ssv-node-v3-2", - "value": "ssv-node-v3-2" - }, - { - "selected": false, - "text": "ssv-node-v3-3", - "value": "ssv-node-v3-3" - }, - { - "selected": false, - "text": "ssv-node-v3-4", - "value": "ssv-node-v3-4" } ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-node-9,ssv-node-10,ssv-node-11,ssv-node-12,ssv-exporter,ssv-exporter-v2,ssv-node-v3-1,ssv-node-v3-2,ssv-node-v3-3,ssv-node-v3-4", + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-exporter,ssv-exporter-v2", "queryValue": "", "skipUrlSync": false, "type": "custom" @@ -2803,13 +3000,13 @@ ] }, "time": { - "from": "now-6h", + "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Node Dashboard", "uid": "QNiMrdoVz", - "version": 59, + "version": 70, "weekStart": "" } \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_operator_performance.json b/monitoring/grafana/dashboard_ssv_operator_performance.json index 1ba7c2714f..ce769ee03d 100644 --- a/monitoring/grafana/dashboard_ssv_operator_performance.json +++ b/monitoring/grafana/dashboard_ssv_operator_performance.json @@ -229,7 +229,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -268,7 +268,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2033,7 +2033,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2072,7 +2072,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3405,7 +3405,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3444,7 +3444,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4777,7 +4777,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4816,7 +4816,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -5969,7 +5969,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -6008,7 +6008,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", diff --git a/monitoring/metricsreporter/metrics_reporter.go b/monitoring/metricsreporter/metrics_reporter.go index 859d46e518..01227e94c6 100644 --- a/monitoring/metricsreporter/metrics_reporter.go +++ b/monitoring/metricsreporter/metrics_reporter.go @@ -4,12 +4,16 @@ import ( "crypto/sha256" "fmt" "strconv" + "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ) // TODO: implement all methods @@ -33,6 +37,10 @@ const ( validatorPending = float64(8) validatorRemoved = float64(9) validatorUnknown = float64(10) + + messageAccepted = "accepted" + messageIgnored = "ignored" + messageRejected = "rejected" ) var ( @@ -65,6 +73,70 @@ var ( Name: "ssv:exporter:operator_index", Help: "operator footprint", }, []string{"pubKey", "index"}) + messageValidationResult = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation", + Help: "Message validation result", + }, []string{"status", "reason", "role", "round"}) + messageValidationSSVType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_ssv_type", + Help: "SSV message type", + }, []string{"type"}) + messageValidationConsensusType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_consensus_type", + Help: "Consensus message type", + }, []string{"type", "signers"}) + messageValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_validation_duration_seconds", + Help: "Message validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + signatureValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_signature_validation_duration_seconds", + Help: "Signature validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + messageSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_size", + Help: "Message size", + Buckets: []float64{100, 500, 1_000, 5_000, 10_000, 50_000, 100_000, 500_000, 1_000_000, 5_000_000}, + }, []string{}) + activeMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv:p2p:pubsub:msg:val:active", + Help: "Count active message validation", + }, []string{"topic"}) + incomingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_incoming", + Help: "The amount of message incoming to the validator's msg queue", + }, []string{"msg_id"}) + outgoingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_outgoing", + Help: "The amount of message outgoing from the validator's msg queue", + }, []string{"msg_id"}) + droppedQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_drops", + Help: "The amount of message dropped from the validator's msg queue", + }, []string{"msg_id"}) + messageQueueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_size", + Help: "Size of message queue", + }, []string{}) + messageQueueCapacity = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_capacity", + Help: "Capacity of message queue", + }, []string{}) + messageTimeInQueue = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_time_in_queue_seconds", + Help: "Time message spent in queue (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.050, 0.100, 0.500, 1, 5, 10, 60}, + }, []string{"msg_id"}) + inCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_in_committee", + Help: "The amount of messages in committee", + }, []string{"ssv_msg_type", "decided"}) + nonCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_non_committee", + Help: "The amount of messages not in committee", + }, []string{"ssv_msg_type", "decided"}) ) type MetricsReporter struct { @@ -84,9 +156,26 @@ func New(opts ...Option) *MetricsReporter { allMetrics := []prometheus.Collector{ ssvNodeStatus, executionClientStatus, + executionClientLastFetchedBlock, validatorStatus, eventProcessed, eventProcessingFailed, + operatorIndex, + messageValidationResult, + messageValidationSSVType, + messageValidationConsensusType, + messageValidationDuration, + signatureValidationDuration, + messageSize, + activeMsgValidation, + incomingQueueMessages, + outgoingQueueMessages, + droppedQueueMessages, + messageQueueSize, + messageQueueCapacity, + messageTimeInQueue, + inCommitteeMessages, + nonCommitteeMessages, } for i, c := range allMetrics { @@ -102,77 +191,183 @@ func New(opts ...Option) *MetricsReporter { return &MetricsReporter{} } -func (m MetricsReporter) SSVNodeHealthy() { +func (m *MetricsReporter) SSVNodeHealthy() { ssvNodeStatus.Set(ssvNodeHealthy) } -func (m MetricsReporter) SSVNodeNotHealthy() { +func (m *MetricsReporter) SSVNodeNotHealthy() { ssvNodeStatus.Set(ssvNodeNotHealthy) } -func (m MetricsReporter) ExecutionClientReady() { +func (m *MetricsReporter) ExecutionClientReady() { executionClientStatus.Set(executionClientOK) } -func (m MetricsReporter) ExecutionClientSyncing() { +func (m *MetricsReporter) ExecutionClientSyncing() { executionClientStatus.Set(executionClientSyncing) } -func (m MetricsReporter) ExecutionClientFailure() { +func (m *MetricsReporter) ExecutionClientFailure() { executionClientStatus.Set(executionClientFailure) } -func (m MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { +func (m *MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { executionClientLastFetchedBlock.Set(float64(block)) } -func (m MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { +func (m *MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { pkHash := fmt.Sprintf("%x", sha256.Sum256(publicKey)) operatorIndex.WithLabelValues(pkHash, strconv.FormatUint(operatorID, 10)).Set(float64(operatorID)) } -func (m MetricsReporter) ValidatorInactive(publicKey []byte) { +func (m *MetricsReporter) ValidatorInactive(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorInactive) } -func (m MetricsReporter) ValidatorNoIndex(publicKey []byte) { +func (m *MetricsReporter) ValidatorNoIndex(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNoIndex) } -func (m MetricsReporter) ValidatorError(publicKey []byte) { +func (m *MetricsReporter) ValidatorError(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorError) } -func (m MetricsReporter) ValidatorReady(publicKey []byte) { +func (m *MetricsReporter) ValidatorReady(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorReady) } -func (m MetricsReporter) ValidatorNotActivated(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotActivated(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotActivated) } -func (m MetricsReporter) ValidatorExiting(publicKey []byte) { +func (m *MetricsReporter) ValidatorExiting(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorExiting) } -func (m MetricsReporter) ValidatorSlashed(publicKey []byte) { +func (m *MetricsReporter) ValidatorSlashed(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorSlashed) } -func (m MetricsReporter) ValidatorNotFound(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotFound(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotFound) } -func (m MetricsReporter) ValidatorPending(publicKey []byte) { +func (m *MetricsReporter) ValidatorPending(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorPending) } -func (m MetricsReporter) ValidatorRemoved(publicKey []byte) { +func (m *MetricsReporter) ValidatorRemoved(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorRemoved) } -func (m MetricsReporter) ValidatorUnknown(publicKey []byte) { +func (m *MetricsReporter) ValidatorUnknown(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorUnknown) } -func (m MetricsReporter) EventProcessed(eventName string) { +func (m *MetricsReporter) EventProcessed(eventName string) { eventProcessed.WithLabelValues(eventName).Inc() } -func (m MetricsReporter) EventProcessingFailed(eventName string) { +func (m *MetricsReporter) EventProcessingFailed(eventName string) { eventProcessingFailed.WithLabelValues(eventName).Inc() } // TODO implement -func (m MetricsReporter) LastBlockProcessed(uint64) {} -func (m MetricsReporter) LogsProcessingError(error) {} +func (m *MetricsReporter) LastBlockProcessed(uint64) {} +func (m *MetricsReporter) LogsProcessingError(error) {} + +func (m *MetricsReporter) MessageAccepted( + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageAccepted, + "", + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageIgnored( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageIgnored, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageRejected( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageRejected, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) SSVMessageType(msgType spectypes.MsgType) { + messageValidationSSVType.WithLabelValues(ssvmessage.MsgTypeToString(msgType)).Inc() +} + +func (m *MetricsReporter) ConsensusMsgType(msgType specqbft.MessageType, signers int) { + messageValidationConsensusType.WithLabelValues(ssvmessage.QBFTMsgTypeToString(msgType), strconv.Itoa(signers)).Inc() +} + +func (m *MetricsReporter) MessageValidationDuration(duration time.Duration, labels ...string) { + messageValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) SignatureValidationDuration(duration time.Duration, labels ...string) { + signatureValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) MessageSize(size int) { + messageSize.WithLabelValues().Observe(float64(size)) +} + +func (m *MetricsReporter) ActiveMsgValidation(topic string) { + activeMsgValidation.WithLabelValues(topic).Inc() +} + +func (m *MetricsReporter) ActiveMsgValidationDone(topic string) { + activeMsgValidation.WithLabelValues(topic).Dec() +} + +func (m *MetricsReporter) IncomingQueueMessage(messageID spectypes.MessageID) { + incomingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) OutgoingQueueMessage(messageID spectypes.MessageID) { + outgoingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) DroppedQueueMessage(messageID spectypes.MessageID) { + droppedQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) MessageQueueSize(size int) { + messageQueueSize.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageQueueCapacity(size int) { + messageQueueCapacity.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageTimeInQueue(messageID spectypes.MessageID, d time.Duration) { + messageTimeInQueue.WithLabelValues(messageID.String()).Observe(d.Seconds()) +} + +func (m *MetricsReporter) InCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + inCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} + +func (m *MetricsReporter) NonCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + nonCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} diff --git a/network/network.go b/network/network.go index 67af7476fb..f40678892c 100644 --- a/network/network.go +++ b/network/network.go @@ -1,19 +1,19 @@ package network import ( + "context" "io" "go.uber.org/zap" - spectypes "github.com/bloxapp/ssv-spec/types" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) // MessageRouter is accepting network messages and route them to the corresponding (internal) components type MessageRouter interface { // Route routes the given message, this function MUST NOT block - Route(logger *zap.Logger, message spectypes.SSVMessage) + Route(ctx context.Context, message *queue.DecodedSSVMessage) } // MessageRouting allows to register a MessageRouter diff --git a/network/p2p/config.go b/network/p2p/config.go index 77f1e599b5..935eaa4c2a 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -14,6 +14,8 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/monitoring/metricsreporter" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/networkconfig" @@ -62,6 +64,10 @@ type Config struct { NodeStorage storage.Storage // Network defines a network configuration. Network networkconfig.NetworkConfig + // MessageValidator validates incoming messages. + MessageValidator validation.MessageValidator + // Metrics report metrics. + Metrics *metricsreporter.MetricsReporter PubsubMsgCacheTTL time.Duration `yaml:"PubsubMsgCacheTTL" env:"PUBSUB_MSG_CACHE_TTL" env-description:"How long a message ID will be remembered as seen"` PubsubOutQueueSize int `yaml:"PubsubOutQueueSize" env:"PUBSUB_OUT_Q_SIZE" env-description:"The size that we assign to the outbound pubsub message queue"` diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 4f27098061..768d583042 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -7,24 +7,22 @@ import ( "time" "github.com/cornelk/hashmap" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - connmgrcore "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" libp2pdiscbackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections" "github.com/bloxapp/ssv/network/records" "github.com/bloxapp/ssv/network/streams" - "github.com/bloxapp/ssv/network/syncing" "github.com/bloxapp/ssv/network/topics" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/utils/async" @@ -56,14 +54,15 @@ type p2pNetwork struct { interfaceLogger *zap.Logger // struct logger to log in interface methods that do not accept a logger cfg *Config - host host.Host - streamCtrl streams.StreamController - idx peers.Index - disc discovery.Service - topicsCtrl topics.Controller - msgRouter network.MessageRouter - msgResolver topics.MsgPeersResolver - connHandler connections.ConnHandler + host host.Host + streamCtrl streams.StreamController + idx peers.Index + disc discovery.Service + topicsCtrl topics.Controller + msgRouter network.MessageRouter + msgResolver topics.MsgPeersResolver + msgValidator validation.MessageValidator + connHandler connections.ConnHandler state int32 @@ -72,7 +71,6 @@ type p2pNetwork struct { backoffConnector *libp2pdiscbackoff.BackoffConnector subnets []byte libConnManager connmgrcore.ConnManager - syncer syncing.Syncer nodeStorage operatorstorage.Storage operatorPKCache sync.Map } @@ -90,6 +88,7 @@ func New(logger *zap.Logger, cfg *Config) network.P2PNetwork { interfaceLogger: logger, cfg: cfg, msgRouter: cfg.Router, + msgValidator: cfg.MessageValidator, state: stateClosed, activeValidators: hashmap.New[string, validatorStatus](), nodeStorage: cfg.NodeStorage, @@ -171,11 +170,6 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { return err } - // Create & start ConcurrentSyncer. - syncer := syncing.NewConcurrent(n.ctx, syncing.New(n), 16, syncing.DefaultTimeouts, nil) - go syncer.Run(logger) - n.syncer = syncer - return nil } diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 708deb79d3..d88be4af21 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -1,6 +1,7 @@ package p2pv1 import ( + "context" "encoding/hex" "fmt" @@ -11,12 +12,12 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/records" - - "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) type validatorStatus int @@ -133,8 +134,8 @@ func (n *p2pNetwork) subscribe(logger *zap.Logger, pk spectypes.ValidatorPK) err } // handleIncomingMessages reads messages from the given channel and calls the router, note that this function blocks. -func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, msg *pubsub.Message) error { - return func(topic string, msg *pubsub.Message) error { +func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(ctx context.Context, topic string, msg *pubsub.Message) error { + return func(ctx context.Context, topic string, msg *pubsub.Message) error { if n.msgRouter == nil { logger.Debug("msg router is not configured") return nil @@ -143,26 +144,28 @@ func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, return nil } - var ssvMsg *spectypes.SSVMessage + var decodedMsg *queue.DecodedSSVMessage if msg.ValidatorData != nil { - m, ok := msg.ValidatorData.(spectypes.SSVMessage) + m, ok := msg.ValidatorData.(*queue.DecodedSSVMessage) if ok { - ssvMsg = &m + decodedMsg = m } } - if ssvMsg == nil { + if decodedMsg == nil { return errors.New("message was not decoded") } - p2pID := ssvMsg.GetID().String() + p2pID := decodedMsg.GetID().String() // logger.With( // zap.String("pubKey", hex.EncodeToString(ssvMsg.MsgID.GetPubKey())), // zap.String("role", ssvMsg.MsgID.GetRoleType().String()), // ).Debug("handlePubsubMessages") - metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(ssvMsg.MsgType)).Inc() - n.msgRouter.Route(logger, *ssvMsg) + metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(decodedMsg.MsgType)).Inc() + + n.msgRouter.Route(ctx, decodedMsg) + return nil } } diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 8ffe70656b..10a0e7cbc3 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -276,14 +276,12 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { } func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { - cfg := &topics.PububConfig{ - Host: n.host, - TraceLog: n.cfg.PubSubTrace, - MsgValidatorFactory: func(s string) topics.MsgValidatorFunc { - return topics.NewSSVMsgValidator() - }, - MsgHandler: n.handlePubsubMessages(logger), - ScoreIndex: n.idx, + cfg := &topics.PubSubConfig{ + Host: n.host, + TraceLog: n.cfg.PubSubTrace, + MsgValidator: n.msgValidator, + MsgHandler: n.handlePubsubMessages(logger), + ScoreIndex: n.idx, //Discovery: n.disc, OutboundQueueSize: n.cfg.PubsubOutQueueSize, ValidationQueueSize: n.cfg.PubsubValidationQueueSize, @@ -302,10 +300,12 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { go cfg.MsgIDHandler.Start() // run GC every 3 minutes to clear old messages async.RunEvery(n.ctx, time.Minute*3, midHandler.GC) - _, tc, err := topics.NewPubsub(n.ctx, logger, cfg) + + _, tc, err := topics.NewPubSub(n.ctx, logger, cfg) if err != nil { return errors.Wrap(err, "could not setup pubsub") } + n.topicsCtrl = tc logger.Debug("topics controller is ready") return nil diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 6b810c7d41..74ac3a4e14 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -1,143 +1,25 @@ package p2pv1 import ( - "context" "encoding/hex" "fmt" "math/rand" "time" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - - "github.com/multiformats/go-multistream" - - "github.com/bloxapp/ssv-spec/qbft" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" libp2p_protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multistream" "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) -func (n *p2pNetwork) SyncHighestDecided(mid spectypes.MessageID) error { - return n.syncer.SyncHighestDecided(context.Background(), n.interfaceLogger, mid, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) - }) -} - -func (n *p2pNetwork) SyncDecidedByRange(mid spectypes.MessageID, from, to qbft.Height) { - if !n.cfg.FullNode { - return - } - // TODO: uncomment to fix syncing bug! - // if from < to { - // n.logger.Warn("failed to sync decided by range: from is greater than to", - // zap.String("pubkey", hex.EncodeToString(mid.GetPubKey())), - // zap.String("role", mid.GetRoleType().String()), - // zap.Uint64("from", uint64(from)), - // zap.Uint64("to", uint64(to))) - // return - // } - if to > from { - n.interfaceLogger.Warn("failed to sync decided by range: to is higher than from", - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - return - } - - // TODO: this is a temporary solution to prevent syncing already decided heights. - // Example: Say we received a decided at height 99, and right after we received a decided at height 100 - // before we could advance the controller's height. This would cause the controller to call SyncDecidedByRange. - // However, height 99 is already synced, so temporarily we reject such requests here. - // Note: This isn't ideal because sometimes you do want to sync gaps of 1. - const minGap = 2 - if to-from < minGap { - return - } - - err := n.syncer.SyncDecidedByRange(context.Background(), n.interfaceLogger, mid, from, to, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) - }) - if err != nil { - n.interfaceLogger.Error("failed to sync decided by range", zap.Error(err)) - } -} - -// LastDecided fetches last decided from a random set of peers -func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { - const ( - minPeers = 3 - waitTime = time.Second * 24 - ) - if !n.isReady() { - return nil, p2pprotocol.ErrNetworkIsNotReady - } - pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) - peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) - if err != nil { - return nil, errors.Wrap(err, "could not get subset of peers") - } - return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ - Params: &message.SyncParams{ - Identifier: mid, - }, - Protocol: message.LastDecidedType, - }) -} - -// GetHistory sync the given range from a set of peers that supports history for the given identifier -func (n *p2pNetwork) GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]p2pprotocol.SyncResult, specqbft.Height, error) { - if from >= to { - return nil, 0, nil - } - - if !n.isReady() { - return nil, 0, p2pprotocol.ErrNetworkIsNotReady - } - protocolID, peerCount := commons.ProtocolID(p2pprotocol.DecidedHistoryProtocol) - peers := make([]peer.ID, 0) - for _, t := range targets { - p, err := peer.Decode(t) - if err != nil { - continue - } - peers = append(peers, p) - } - // if no peers were provided -> select a random set of peers - if len(peers) == 0 { - random, err := n.getSubsetOfPeers(logger, mid.GetPubKey(), peerCount, n.peersWithProtocolsFilter(protocolID)) - if err != nil { - return nil, 0, errors.Wrap(err, "could not get subset of peers") - } - peers = random - } - maxBatchRes := specqbft.Height(n.cfg.MaxBatchResponse) - - var results []p2pprotocol.SyncResult - var err error - currentEnd := to - if to-from > maxBatchRes { - currentEnd = from + maxBatchRes - } - results, err = n.makeSyncRequest(logger, peers, mid, protocolID, &message.SyncMessage{ - Params: &message.SyncParams{ - Height: []specqbft.Height{from, currentEnd}, - Identifier: mid, - }, - Protocol: message.DecidedHistoryType, - }) - if err != nil { - return results, 0, err - } - return results, currentEnd, nil -} - // RegisterHandlers registers the given handlers func (n *p2pNetwork) RegisterHandlers(logger *zap.Logger, handlers ...*p2pprotocol.SyncHandler) { m := make(map[libp2p_protocol.ID][]p2pprotocol.RequestHandler) @@ -274,6 +156,8 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp } // peersWithProtocolsFilter is used to accept peers that supports the given protocols +// +//nolint:unused func (n *p2pNetwork) peersWithProtocolsFilter(protocols ...libp2p_protocol.ID) func(peer.ID) bool { return func(id peer.ID) bool { supported, err := n.host.Network().Peerstore().SupportsProtocols(id, protocols...) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 9fc132d0ff..d2152c049e 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -9,6 +9,10 @@ import ( "time" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -18,8 +22,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/network" - protcolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/protocol/v2/types" + p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) func TestGetMaxPeers(t *testing.T) { @@ -118,7 +121,7 @@ func TestP2pNetwork_Stream(t *testing.T) { pk, err := hex.DecodeString(pkHex) require.NoError(t, err) - mid := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + mid := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) rounds := []specqbft.Round{ 1, 1, 1, 1, 2, 2, @@ -140,7 +143,7 @@ func TestP2pNetwork_Stream(t *testing.T) { <-time.After(time.Second) node := ln.Nodes[0] - res, err := node.LastDecided(logger, mid) + res, err := node.(*p2pNetwork).LastDecided(logger, mid) require.NoError(t, err) select { case err := <-errors: @@ -205,9 +208,30 @@ func TestWaitSubsetOfPeers(t *testing.T) { } } +func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { + const ( + minPeers = 3 + waitTime = time.Second * 24 + ) + if !n.isReady() { + return nil, p2pprotocol.ErrNetworkIsNotReady + } + pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) + peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) + if err != nil { + return nil, errors.Wrap(err, "could not get subset of peers") + } + return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ + Params: &message.SyncParams{ + Identifier: mid, + }, + Protocol: message.LastDecidedType, + }) +} + func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes.MessageID, height specqbft.Height, round specqbft.Round, counter *int64, errors chan<- error) { - node.RegisterHandlers(logger, &protcolp2p.SyncHandler{ - Protocol: protcolp2p.LastDecidedProtocol, + node.RegisterHandlers(logger, &p2pprotocol.SyncHandler{ + Protocol: p2pprotocol.LastDecidedProtocol, Handler: func(message *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { atomic.AddInt64(counter, 1) sm := specqbft.SignedMessage{ @@ -235,21 +259,23 @@ func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes. }) } -func createNetworkAndSubscribe(t *testing.T, ctx context.Context, n int, pks ...string) (*LocalNet, []*dummyRouter, error) { +func createNetworkAndSubscribe(t *testing.T, ctx context.Context, nodes int, pks ...string) (*LocalNet, []*dummyRouter, error) { logger := logging.TestLogger(t) - ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), n, n/2-1, false) + ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), nodes, nodes/2-1, false) if err != nil { return nil, nil, err } - if len(ln.Nodes) != n { - return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), n) + if len(ln.Nodes) != nodes { + return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), nodes) } logger.Debug("created local network") - routers := make([]*dummyRouter, n) + routers := make([]*dummyRouter, nodes) for i, node := range ln.Nodes { - routers[i] = &dummyRouter{i: i} + routers[i] = &dummyRouter{ + i: i, + } node.UseMessageRouter(routers[i]) } @@ -299,9 +325,8 @@ type dummyRouter struct { i int } -func (r *dummyRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - c := atomic.AddUint64(&r.count, 1) - logger.Debug("got message", zap.Uint64("count", c)) +func (r *dummyRouter) Route(_ context.Context, _ *queue.DecodedSSVMessage) { + atomic.AddUint64(&r.count, 1) } func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { @@ -309,7 +334,7 @@ func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) signedMsg := &specqbft.SignedMessage{ Message: specqbft.Message{ MsgType: specqbft.CommitMsgType, diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index bcfa9ad311..70e862aaa7 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -12,12 +12,14 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections/mock" "github.com/bloxapp/ssv/network/testing" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/utils/format" "github.com/bloxapp/ssv/utils/rsaencryption" ) @@ -136,6 +138,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys MockGetPrivateKey: keys.OperatorKey, RegisteredOperatorPublicKeyPEMs: []string{}, } + cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork) p := New(logger, cfg) err = p.Setup(logger) diff --git a/network/syncing/concurrent.go b/network/syncing/concurrent.go deleted file mode 100644 index d3ddcd2ec1..0000000000 --- a/network/syncing/concurrent.go +++ /dev/null @@ -1,189 +0,0 @@ -package syncing - -import ( - "context" - "fmt" - "sync" - "time" - - "go.uber.org/zap" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" -) - -// Error describes an error that occurred during a syncing operation. -type Error struct { - Operation Operation - Err error -} - -func (e Error) Error() string { - return fmt.Sprintf("%s: %v", e.Operation, e.Err) -} - -// Timeouts is a set of timeouts for each syncing operation. -type Timeouts struct { - // SyncHighestDecided is the timeout for SyncHighestDecided. - // Leave zero to not timeout. - SyncHighestDecided time.Duration - - // SyncDecidedByRange is the timeout for SyncDecidedByRange. - // Leave zero to not timeout. - SyncDecidedByRange time.Duration -} - -var DefaultTimeouts = Timeouts{ - SyncHighestDecided: 12 * time.Second, - SyncDecidedByRange: 30 * time.Minute, -} - -// Operation is a syncing operation that has been queued for execution. -type Operation interface { - run(context.Context, *zap.Logger, Syncer) error - timeout(Timeouts) time.Duration -} - -type OperationSyncHighestDecided struct { - ID spectypes.MessageID - Handler MessageHandler -} - -func (o OperationSyncHighestDecided) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncHighestDecided(ctx, logger, o.ID, o.Handler) -} - -func (o OperationSyncHighestDecided) timeout(t Timeouts) time.Duration { - return t.SyncHighestDecided -} - -func (o OperationSyncHighestDecided) String() string { - return fmt.Sprintf("SyncHighestDecided(%s)", o.ID) -} - -type OperationSyncDecidedByRange struct { - ID spectypes.MessageID - From specqbft.Height - To specqbft.Height - Handler MessageHandler -} - -func (o OperationSyncDecidedByRange) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncDecidedByRange(ctx, logger, o.ID, o.From, o.To, o.Handler) -} - -func (o OperationSyncDecidedByRange) timeout(t Timeouts) time.Duration { - return t.SyncDecidedByRange -} - -func (o OperationSyncDecidedByRange) String() string { - return fmt.Sprintf("SyncDecidedByRange(%s, %d, %d)", o.ID, o.From, o.To) -} - -// ConcurrentSyncer is a Syncer that runs the given Syncer's methods concurrently. -type ConcurrentSyncer struct { - syncer Syncer - ctx context.Context - jobs chan Operation - errors chan<- Error - concurrency int - timeouts Timeouts -} - -// NewConcurrent returns a new Syncer that runs the given Syncer's methods concurrently. -// Unlike the standard syncer, syncing methods are non-blocking and return immediately without error. -// concurrency is the number of worker goroutines to spawn. -// errors is a channel to which any errors are sent. Pass nil to discard errors. -func NewConcurrent( - ctx context.Context, - syncer Syncer, - concurrency int, - timeouts Timeouts, - errors chan<- Error, -) *ConcurrentSyncer { - return &ConcurrentSyncer{ - syncer: syncer, - ctx: ctx, - // TODO: make the buffer size configurable or better-yet unbounded? - jobs: make(chan Operation, 128*1024), - errors: errors, - concurrency: concurrency, - timeouts: timeouts, - } -} - -// Run starts the worker goroutines and blocks until the context is done -// and any remaining jobs are finished. -func (s *ConcurrentSyncer) Run(logger *zap.Logger) { - // Spawn worker goroutines. - var wg sync.WaitGroup - for i := 0; i < s.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for job := range s.jobs { - s.do(logger, job) - } - }() - } - - // Close the jobs channel when the context is done. - <-s.ctx.Done() - close(s.jobs) - - // Wait for workers to finish their current jobs. - wg.Wait() -} - -func (s *ConcurrentSyncer) do(logger *zap.Logger, job Operation) { - ctx, cancel := context.WithTimeout(s.ctx, job.timeout(s.timeouts)) - defer cancel() - err := job.run(ctx, logger, s.syncer) - if err != nil && s.errors != nil { - s.errors <- Error{ - Operation: job, - Err: err, - } - } -} - -// Queued returns the number of jobs that are queued but not yet started. -func (s *ConcurrentSyncer) Queued() int { - return len(s.jobs) -} - -// Capacity returns the maximum number of jobs that can be queued. -// When Queued() == Capacity(), then the next call will block -// until a job is finished. -func (s *ConcurrentSyncer) Capacity() int { - return cap(s.jobs) -} - -func (s *ConcurrentSyncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - s.jobs <- OperationSyncHighestDecided{ - ID: id, - Handler: handler, - } - return nil -} - -func (s *ConcurrentSyncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - s.jobs <- OperationSyncDecidedByRange{ - ID: id, - From: from, - To: to, - Handler: handler, - } - return nil -} diff --git a/network/syncing/concurrent_test.go b/network/syncing/concurrent_test.go deleted file mode 100644 index ace426f6a2..0000000000 --- a/network/syncing/concurrent_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package syncing_test - -import ( - "context" - "fmt" - "runtime" - "testing" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/syncing" - "github.com/bloxapp/ssv/network/syncing/mocks" -) - -func TestConcurrentSyncer(t *testing.T) { - logger := logging.TestLogger(t) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Test setup - syncer := mocks.NewMockSyncer(ctrl) - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(nil) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - syncer.EXPECT().SyncDecidedByRange(gomock.Any(), gomock.Any(), id, from, to, gomock.Any()).Return(nil) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - - // Test error handling - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(fmt.Errorf("test error")) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Wait for the syncer to finish - cancel() - - // Verify errors. - select { - case err := <-errors: - require.IsType(t, syncing.OperationSyncHighestDecided{}, err.Operation) - require.Equal(t, id, err.Operation.(syncing.OperationSyncHighestDecided).ID) - require.Equal(t, "test error", err.Err.Error()) - case <-done: - t.Fatal("error channel should have received an error") - } - <-done -} - -func TestConcurrentSyncerMemoryUsage(t *testing.T) { - logger := logging.TestLogger(t) - - for i := 0; i < 4; i++ { - var before runtime.MemStats - runtime.ReadMemStats(&before) - - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - - var after runtime.MemStats - runtime.ReadMemStats(&after) - t.Logf("Allocated: %.2f MB", float64(after.TotalAlloc-before.TotalAlloc)/1024/1024) - } -} - -func BenchmarkConcurrentSyncer(b *testing.B) { - logger := logging.BenchLogger(b) - - for i := 0; i < b.N; i++ { - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(b, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(b, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - } -} diff --git a/network/syncing/mocks/syncer.go b/network/syncing/mocks/syncer.go deleted file mode 100644 index 1aa3a3d55d..0000000000 --- a/network/syncing/mocks/syncer.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./syncer.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - qbft "github.com/bloxapp/ssv-spec/qbft" - types "github.com/bloxapp/ssv-spec/types" - syncing "github.com/bloxapp/ssv/network/syncing" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - gomock "github.com/golang/mock/gomock" - zap "go.uber.org/zap" -) - -// MockSyncer is a mock of Syncer interface. -type MockSyncer struct { - ctrl *gomock.Controller - recorder *MockSyncerMockRecorder -} - -// MockSyncerMockRecorder is the mock recorder for MockSyncer. -type MockSyncerMockRecorder struct { - mock *MockSyncer -} - -// NewMockSyncer creates a new mock instance. -func NewMockSyncer(ctrl *gomock.Controller) *MockSyncer { - mock := &MockSyncer{ctrl: ctrl} - mock.recorder = &MockSyncerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSyncer) EXPECT() *MockSyncerMockRecorder { - return m.recorder -} - -// SyncDecidedByRange mocks base method. -func (m *MockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id types.MessageID, from, to qbft.Height, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncDecidedByRange", ctx, logger, id, from, to, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncDecidedByRange indicates an expected call of SyncDecidedByRange. -func (mr *MockSyncerMockRecorder) SyncDecidedByRange(ctx, logger, id, from, to, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncDecidedByRange", reflect.TypeOf((*MockSyncer)(nil).SyncDecidedByRange), ctx, logger, id, from, to, handler) -} - -// SyncHighestDecided mocks base method. -func (m *MockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id types.MessageID, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncHighestDecided", ctx, logger, id, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncHighestDecided indicates an expected call of SyncHighestDecided. -func (mr *MockSyncerMockRecorder) SyncHighestDecided(ctx, logger, id, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncHighestDecided", reflect.TypeOf((*MockSyncer)(nil).SyncHighestDecided), ctx, logger, id, handler) -} - -// MockNetwork is a mock of Network interface. -type MockNetwork struct { - ctrl *gomock.Controller - recorder *MockNetworkMockRecorder -} - -// MockNetworkMockRecorder is the mock recorder for MockNetwork. -type MockNetworkMockRecorder struct { - mock *MockNetwork -} - -// NewMockNetwork creates a new mock instance. -func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { - mock := &MockNetwork{ctrl: ctrl} - mock.recorder = &MockNetworkMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { - return m.recorder -} - -// GetHistory mocks base method. -func (m *MockNetwork) GetHistory(logger *zap.Logger, id types.MessageID, from, to qbft.Height, targets ...string) ([]protocolp2p.SyncResult, qbft.Height, error) { - m.ctrl.T.Helper() - varargs := []interface{}{logger, id, from, to} - for _, a := range targets { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetHistory", varargs...) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(qbft.Height) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetHistory indicates an expected call of GetHistory. -func (mr *MockNetworkMockRecorder) GetHistory(logger, id, from, to interface{}, targets ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{logger, id, from, to}, targets...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistory", reflect.TypeOf((*MockNetwork)(nil).GetHistory), varargs...) -} - -// LastDecided mocks base method. -func (m *MockNetwork) LastDecided(logger *zap.Logger, id types.MessageID) ([]protocolp2p.SyncResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastDecided", logger, id) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LastDecided indicates an expected call of LastDecided. -func (mr *MockNetworkMockRecorder) LastDecided(logger, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastDecided", reflect.TypeOf((*MockNetwork)(nil).LastDecided), logger, id) -} diff --git a/network/syncing/syncer.go b/network/syncing/syncer.go deleted file mode 100644 index db36a94028..0000000000 --- a/network/syncing/syncer.go +++ /dev/null @@ -1,207 +0,0 @@ -package syncing - -import ( - "context" - "time" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/utils/tasks" -) - -//go:generate mockgen -package=mocks -destination=./mocks/syncer.go -source=./syncer.go - -// MessageHandler reacts to a message received from Syncer. -type MessageHandler func(msg spectypes.SSVMessage) - -// Syncer handles the syncing of decided messages. -type Syncer interface { - SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler MessageHandler) error - SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, - ) error -} - -// Network is a subset of protocolp2p.Syncer, required by Syncer to retrieve messages from peers. -type Network interface { - LastDecided(logger *zap.Logger, id spectypes.MessageID) ([]protocolp2p.SyncResult, error) - GetHistory( - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - targets ...string, - ) ([]protocolp2p.SyncResult, specqbft.Height, error) -} - -type syncer struct { - network Network -} - -// New returns a standard implementation of Syncer. -func New(network Network) Syncer { - return &syncer{ - network: network, - } -} - -func (s *syncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncHighestDecided"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType())) - - lastDecided, err := s.network.LastDecided(logger, id) - if err != nil { - logger.Debug("last decided sync failed", zap.Error(err)) - return errors.Wrap(err, "could not sync last decided") - } - if len(lastDecided) == 0 { - logger.Debug("no messages were synced") - return nil - } - - results := protocolp2p.SyncResults(lastDecided) - var maxHeight specqbft.Height - results.ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if m.Message.Height > maxHeight { - maxHeight = m.Message.Height - } - raw, err := m.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return false - } - handler(spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - }) - return false - }) - logger.Debug("synced last decided", zap.Uint64("highest_height", uint64(maxHeight)), zap.Int("messages", len(lastDecided))) - return nil -} - -func (s *syncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncDecidedByRange"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType()), - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - logger.Debug("syncing decided by range") - - err := s.getDecidedByRange( - context.Background(), - logger, - id, - from, - to, - func(sm *specqbft.SignedMessage) error { - raw, err := sm.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return nil - } - handler(spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - }) - return nil - }, - ) - if err != nil { - logger.Debug("sync failed", zap.Error(err)) - } - return err -} - -// getDecidedByRange calls GetHistory in batches to retrieve all decided messages in the given range. -func (s *syncer) getDecidedByRange( - ctx context.Context, - logger *zap.Logger, - mid spectypes.MessageID, - from, to specqbft.Height, - handler func(*specqbft.SignedMessage) error, -) error { - const maxRetries = 2 - - var ( - visited = make(map[specqbft.Height]struct{}) - msgs []protocolp2p.SyncResult - ) - - tail := from - var err error - for tail < to { - if ctx.Err() != nil { - return ctx.Err() - } - err := tasks.RetryWithContext(ctx, func() error { - start := time.Now() - msgs, tail, err = s.network.GetHistory(logger, mid, tail, to) - if err != nil { - return err - } - handled := 0 - protocolp2p.SyncResults(msgs).ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if _, ok := visited[m.Message.Height]; ok { - return false - } - if err := handler(m); err != nil { - logger.Warn("could not handle signed message") - } - handled++ - visited[m.Message.Height] = struct{}{} - return false - }) - logger.Debug("received and processed history batch", - zap.Int64("tail", int64(tail)), - fields.Duration(start), - zap.Int("results_count", len(msgs)), - fields.SyncResults(msgs), - zap.Int("handled", handled)) - return nil - }, maxRetries) - if err != nil { - return err - } - } - - return nil -} diff --git a/network/syncing/syncer_test.go b/network/syncing/syncer_test.go deleted file mode 100644 index e0f99c3fb4..0000000000 --- a/network/syncing/syncer_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package syncing_test - -import ( - "context" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/network/syncing" -) - -type mockSyncer struct{} - -func (m *mockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler syncing.MessageHandler) error { - return nil -} - -func (m *mockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, from specqbft.Height, to specqbft.Height, handler syncing.MessageHandler) error { - return nil -} - -type mockMessageHandler struct { - calls int - handler syncing.MessageHandler -} - -func newMockMessageHandler() *mockMessageHandler { - m := &mockMessageHandler{} - m.handler = func(msg spectypes.SSVMessage) { - m.calls++ - } - return m -} diff --git a/network/topics/controller.go b/network/topics/controller.go index 3ac1dea7e6..bbc9e3f821 100644 --- a/network/topics/controller.go +++ b/network/topics/controller.go @@ -37,7 +37,11 @@ type Controller interface { } // PubsubMessageHandler handles incoming messages -type PubsubMessageHandler func(string, *pubsub.Message) error +type PubsubMessageHandler func(context.Context, string, *pubsub.Message) error + +type messageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} // topicsCtrl implements Controller type topicsCtrl struct { @@ -45,25 +49,31 @@ type topicsCtrl struct { logger *zap.Logger // struct logger to implement i.Closer ps *pubsub.PubSub // scoreParamsFactory is a function that helps to set scoring params on topics - scoreParamsFactory func(string) *pubsub.TopicScoreParams - msgValidatorFactory func(string) MsgValidatorFunc - msgHandler PubsubMessageHandler - subFilter SubFilter + scoreParamsFactory func(string) *pubsub.TopicScoreParams + msgValidator messageValidator + msgHandler PubsubMessageHandler + subFilter SubFilter container *topicsContainer } // NewTopicsController creates an instance of Controller -func NewTopicsController(ctx context.Context, logger *zap.Logger, msgHandler PubsubMessageHandler, - msgValidatorFactory func(string) MsgValidatorFunc, subFilter SubFilter, pubSub *pubsub.PubSub, - scoreParams func(string) *pubsub.TopicScoreParams) Controller { +func NewTopicsController( + ctx context.Context, + logger *zap.Logger, + msgHandler PubsubMessageHandler, + msgValidator messageValidator, + subFilter SubFilter, + pubSub *pubsub.PubSub, + scoreParams func(string) *pubsub.TopicScoreParams, +) Controller { ctrl := &topicsCtrl{ - ctx: ctx, - logger: logger, - ps: pubSub, - scoreParamsFactory: scoreParams, - msgValidatorFactory: msgValidatorFactory, - msgHandler: msgHandler, + ctx: ctx, + logger: logger, + ps: pubSub, + scoreParamsFactory: scoreParams, + msgValidator: msgValidator, + msgHandler: msgHandler, subFilter: subFilter, } @@ -171,7 +181,7 @@ func (ctrl *topicsCtrl) Broadcast(name string, data []byte, timeout time.Duratio func (ctrl *topicsCtrl) Unsubscribe(logger *zap.Logger, name string, hard bool) error { ctrl.container.Unsubscribe(name) - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { err := ctrl.ps.UnregisterTopicValidator(name) if err != nil { logger.Debug("could not unregister msg validator", zap.String("topic", name), zap.Error(err)) @@ -207,7 +217,9 @@ func (ctrl *topicsCtrl) start(logger *zap.Logger, name string, sub *pubsub.Subsc func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) error { ctx, cancel := context.WithCancel(ctrl.ctx) defer cancel() + topicName := sub.Topic() + logger = logger.With(zap.String("topic", topicName)) logger.Debug("start listening to topic") for ctx.Err() == nil { @@ -235,7 +247,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err ).Inc() } - if err := ctrl.msgHandler(topicName, msg); err != nil { + if err := ctrl.msgHandler(ctx, topicName, msg); err != nil { logger.Debug("could not handle msg", zap.Error(err)) } } @@ -244,7 +256,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err // setupTopicValidator registers the topic validator func (ctrl *topicsCtrl) setupTopicValidator(name string) error { - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { // first try to unregister in case there is already a msg validator for that topic (e.g. fork scenario) _ = ctrl.ps.UnregisterTopicValidator(name) @@ -252,7 +264,7 @@ func (ctrl *topicsCtrl) setupTopicValidator(name string) error { // Optional: set a timeout for message validation // opts = append(opts, pubsub.WithValidatorTimeout(time.Second)) - err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidatorFactory(name), opts...) + err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidator.ValidatorForTopic(name), opts...) if err != nil { return errors.Wrap(err, "could not register topic validator") } diff --git a/network/topics/controller_test.go b/network/topics/controller_test.go index bc1e028cc4..4a09584cfb 100644 --- a/network/topics/controller_test.go +++ b/network/topics/controller_test.go @@ -2,61 +2,94 @@ package topics import ( "context" + "encoding/base64" "encoding/hex" - "fmt" + "encoding/json" + "math" "sync" "sync/atomic" "testing" "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/commons" - - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" + "github.com/bloxapp/ssv/networkconfig" ) func TestTopicManager(t *testing.T) { logger := logging.TestLogger(t) - nPeers := 4 - - pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", - "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", - "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", - "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", - "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", - "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", - "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", - "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - peers := newPeers(ctx, logger, t, nPeers, false, true) - baseTest(t, ctx, logger, peers, pks, 1, 2) + + t.Run("happy flow", func(t *testing.T) { + nPeers := 4 + + pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", + "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", + "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", + "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + peers := newPeers(ctx, logger, t, nPeers, validator, true, nil) + baseTest(t, ctx, logger, peers, pks, 1, 2) + }) + + t.Run("banning peer", func(t *testing.T) { + t.Skip() // TODO: finish the test + + pks := []string{ + "b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + scoreMap := map[peer.ID]*pubsub.PeerScoreSnapshot{} + var scoreMapMu sync.Mutex + + scoreInspector := func(m map[peer.ID]*pubsub.PeerScoreSnapshot) { + b, _ := json.Marshal(m) + t.Logf("peer scores: %v", string(b)) + + scoreMapMu.Lock() + defer scoreMapMu.Unlock() + + scoreMap = m + } + + const nPeers = 4 + peers := newPeers(ctx, logger, t, nPeers, validator, true, scoreInspector) + banningTest(t, ctx, logger, peers, pks, scoreMap, &scoreMapMu) + }) } func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, minMsgCount, maxMsgCount int) { nValidators := len(pks) // nPeers := len(peers) - validatorTopic := func(pkhex string) string { - pk, err := hex.DecodeString(pkhex) - if err != nil { - return "invalid" - } - return commons.ValidatorTopicID(pk)[0] - } - t.Log("subscribing to topics") // listen to topics for _, pk := range pks { @@ -85,7 +118,7 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Add(1) go func(p *P, pk string, pi int) { defer wg.Done() - msg, err := dummyMsg(pk, pi%4) + msg, err := dummyMsg(pk, pi%4, false) require.NoError(t, err) raw, err := msg.Encode() require.NoError(t, err) @@ -146,6 +179,109 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Wait() } +func banningTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, scoreMap map[peer.ID]*pubsub.PeerScoreSnapshot, scoreMapMu *sync.Mutex) { + t.Log("subscribing to topics") + + for _, pk := range pks { + for _, p := range peers { + require.NoError(t, p.tm.Subscribe(logger, validatorTopic(pk))) + } + } + + // wait for the peers to join topics + <-time.After(3 * time.Second) + + t.Log("checking initial scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) + } + } + } + + t.Log("broadcasting messages") + + const invalidMessagesCount = 10 + + // TODO: get current default score, send an invalid rejected message, check the score; then run 10 of them and check the score; then check valid message + + invalidMessages, err := msgSequence(pks[0], invalidMessagesCount, len(pks), true) + require.NoError(t, err) + + var wg sync.WaitGroup + // publish some messages + for i, msg := range invalidMessages { + wg.Add(1) + go func(p *P, pk string, msg *spectypes.SSVMessage) { + defer wg.Done() + + raw, err := msg.Encode() + require.NoError(t, err) + + require.NoError(t, p.tm.Broadcast(validatorTopic(pk), raw, time.Second*10)) + + <-time.After(time.Second * 5) + }(peers[0], pks[i%len(pks)], msg) + } + wg.Wait() + + <-time.After(5 * time.Second) + + t.Log("checking final scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) // TODO: score should change + } + } + } + + //t.Log("unsubscribing") + //// unsubscribing multiple times for each topic + //wg.Add(1) + //go func(p *P, pk string) { + // defer wg.Done() + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // go func(p *P) { + // <-time.After(time.Millisecond) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + // wg.Add(1) + // go func(p *P) { + // defer wg.Done() + // <-time.After(time.Millisecond * 50) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + //}(peer, pk) + // + //wg.Wait() +} + +func validatorTopic(pkhex string) string { + pk, err := hex.DecodeString(pkhex) + if err != nil { + return "invalid" + } + return commons.ValidatorTopicID(pk)[0] +} + type P struct { host host.Host ps *pubsub.PubSub @@ -181,10 +317,10 @@ func (p *P) saveMsg(t string, msg *pubsub.Message) { } // TODO: use p2p/testing -func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator, msgID bool) []*P { +func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) []*P { peers := make([]*P, n) for i := 0; i < n; i++ { - peers[i] = newPeer(ctx, logger, t, msgValidator, msgID) + peers[i] = newPeer(ctx, logger, t, msgValidator, msgID, scoreInspector) } t.Logf("%d peers were created", n) th := uint64(n/2) + uint64(n/4) @@ -203,7 +339,7 @@ func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgV return peers } -func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator, msgID bool) *P { +func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) *P { h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0")) require.NoError(t, err) ds, err := discovery.NewLocalDiscovery(ctx, logger, h) @@ -215,11 +351,11 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator midHandler = NewMsgIDHandler(ctx, 2*time.Minute) go midHandler.Start() } - cfg := &PububConfig{ + cfg := &PubSubConfig{ Host: h, TraceLog: false, MsgIDHandler: midHandler, - MsgHandler: func(topic string, msg *pubsub.Message) error { + MsgHandler: func(_ context.Context, topic string, msg *pubsub.Message) error { p.saveMsg(topic, msg) return nil }, @@ -228,15 +364,13 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator IPColocationWeight: 0, OneEpochDuration: time.Minute, }, + MsgValidator: msgValidator, + ScoreInspector: scoreInspector, + ScoreInspectorInterval: 100 * time.Millisecond, // TODO: add mock for peers.ScoreIndex } - // - if msgValidator { - cfg.MsgValidatorFactory = func(s string) MsgValidatorFunc { - return NewSSVMsgValidator() - } - } - ps, tm, err := NewPubsub(ctx, logger, cfg) + + ps, tm, err := NewPubSub(ctx, logger, cfg) require.NoError(t, err) p = &P{ @@ -258,28 +392,63 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator return p } -func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { +func msgSequence(pkHex string, n, committeeSize int, malformed bool) ([]*spectypes.SSVMessage, error) { + var messages []*spectypes.SSVMessage + + for i := 0; i < n; i++ { + height := i * committeeSize + msg, err := dummyMsg(pkHex, height, malformed) + if err != nil { + return nil, err + } + + messages = append(messages, msg) + } + + return messages, nil +} + +func dummyMsg(pkHex string, height int, malformed bool) (*spectypes.SSVMessage, error) { pk, err := hex.DecodeString(pkHex) if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) - return &spectypes.SSVMessage{ + + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + signature, err := base64.StdEncoding.DecodeString("sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN") + if err != nil { + return nil, err + } + + signedMessage := specqbft.SignedMessage{ + Signature: signature, + Signers: []spectypes.OperatorID{1, 3, 4}, + Message: specqbft.Message{ + MsgType: specqbft.RoundChangeMsgType, + Height: specqbft.Height(height), + Round: 2, + Identifier: id[:], + Root: [32]byte{}, + }, + FullData: nil, + } + + msgData, err := signedMessage.Encode() + if err != nil { + return nil, err + } + + ssvMsg := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), - }, nil + Data: msgData, + } + + if malformed { + ssvMsg.MsgType = math.MaxUint64 + } + + return ssvMsg, nil } // diff --git a/network/topics/metrics.go b/network/topics/metrics.go index 53c651967e..7df570090a 100644 --- a/network/topics/metrics.go +++ b/network/topics/metrics.go @@ -6,15 +6,12 @@ import ( "go.uber.org/zap" ) +// TODO: replace with new metrics var ( metricPubsubTrace = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:network:pubsub:trace", Help: "Traces of pubsub messages", }, []string{"type"}) - metricPubsubMsgValidationResults = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:network:pubsub:msg:validation", - Help: "Traces of pubsub message validation results", - }, []string{"type"}) metricPubsubOutbound = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:p2p:pubsub:msg:out", Help: "Count broadcasted messages", @@ -23,10 +20,6 @@ var ( Name: "ssv:p2p:pubsub:msg:in", Help: "Count incoming messages", }, []string{"topic", "msg_type"}) - metricPubsubActiveMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ssv:p2p:pubsub:msg:val:active", - Help: "Count active message validation", - }, []string{"topic"}) metricPubsubPeerScoreInspect = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ssv:p2p:pubsub:score:inspect", Help: "Gauge for negative peer scores", @@ -38,30 +31,13 @@ func init() { if err := prometheus.Register(metricPubsubTrace); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubMsgValidationResults); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubOutbound); err != nil { logger.Debug("could not register prometheus collector") } if err := prometheus.Register(metricPubsubInbound); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubActiveMsgValidation); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubPeerScoreInspect); err != nil { logger.Debug("could not register prometheus collector") } } - -type msgValidationResult string - -var ( - validationResultNoData msgValidationResult = "no_data" - validationResultEncoding msgValidationResult = "encoding" -) - -func reportValidationResult(result msgValidationResult) { - metricPubsubMsgValidationResults.WithLabelValues(string(result)).Inc() -} diff --git a/network/topics/msg_validator.go b/network/topics/msg_validator.go deleted file mode 100644 index f1329fa698..0000000000 --- a/network/topics/msg_validator.go +++ /dev/null @@ -1,67 +0,0 @@ -package topics - -import ( - "context" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/bloxapp/ssv/network/commons" -) - -// MsgValidatorFunc represents a message validator -type MsgValidatorFunc = func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult - -// NewSSVMsgValidator creates a new msg validator that validates message structure, -// and checks that the message was sent on the right topic. -// TODO: enable post SSZ change, remove logs, break into smaller validators? -func NewSSVMsgValidator() func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - return func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { - topic := pmsg.GetTopic() - metricPubsubActiveMsgValidation.WithLabelValues(topic).Inc() - defer metricPubsubActiveMsgValidation.WithLabelValues(topic).Dec() - if len(pmsg.GetData()) == 0 { - reportValidationResult(validationResultNoData) - return pubsub.ValidationReject - } - msg, err := commons.DecodeNetworkMsg(pmsg.GetData()) - if err != nil { - // can't decode message - // logger.Debug("invalid: can't decode message", zap.Error(err)) - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - if msg == nil { - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - pmsg.ValidatorData = *msg - return pubsub.ValidationAccept - - // Check if the message was sent on the right topic. - // currentTopic := pmsg.GetTopic() - // currentTopicBaseName := fork.GetTopicBaseName(currentTopic) - // topics := fork.ValidatorTopicID(msg.GetID().GetPubKey()) - // for _, tp := range topics { - // if tp == currentTopicBaseName { - // reportValidationResult(validationResultValid) - // return pubsub.ValidationAccept - // } - //} - // reportValidationResult(validationResultTopic) - // return pubsub.ValidationReject - } -} - -//// CombineMsgValidators executes multiple validators -// func CombineMsgValidators(validators ...MsgValidatorFunc) MsgValidatorFunc { -// return func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { -// res := pubsub.ValidationAccept -// for _, v := range validators { -// if res = v(ctx, p, msg); res == pubsub.ValidationReject { -// break -// } -// } -// return res -// } -//} diff --git a/network/topics/msg_validator_test.go b/network/topics/msg_validator_test.go index 3a4f6b2081..dd66fb8312 100644 --- a/network/topics/msg_validator_test.go +++ b/network/topics/msg_validator_test.go @@ -2,44 +2,69 @@ package topics import ( "context" - "encoding/hex" - "fmt" "testing" + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/herumi/bls-eth-go-binary/bls" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" pubsub "github.com/libp2p/go-libp2p-pubsub" ps_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network/commons" - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/bloxapp/ssv/utils/threshold" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" ) func TestMsgValidator(t *testing.T) { - pks := createSharePublicKeys(4) - mv := NewSSVMsgValidator() + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: v1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + mv := validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithShareStorage(ns.Shares())) require.NotNil(t, mv) + slot := networkconfig.TestNetwork.Beacon.GetBeaconNetwork().EstimatedCurrentSlot() + t.Run("valid consensus msg", func(t *testing.T) { - pkHex := pks[0] - msg, err := dummySSVConsensusMsg(pkHex, 15160) + msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, qbft.Height(slot)) require.NoError(t, err) + raw, err := msg.Encode() require.NoError(t, err) - pk, err := hex.DecodeString(pkHex) - require.NoError(t, err) - topics := commons.ValidatorTopicID(pk) + + topics := commons.ValidatorTopicID(share.ValidatorPubKey) pmsg := newPBMsg(raw, commons.GetTopicFullName(topics[0]), []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) - require.Equal(t, res, pubsub.ValidationAccept) + res := mv.ValidatePubsubMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + require.Equal(t, pubsub.ValidationAccept, res) }) // TODO: enable once topic validation is in place - // t.Run("wrong topic", func(t *testing.T) { + //t.Run("wrong topic", func(t *testing.T) { // pkHex := "b5de683dbcb3febe8320cc741948b9282d59b75a6970ed55d6f389da59f26325331b7ea0e71a2552373d0debb6048b8a" - // msg, err := dummySSVConsensusMsg(pkHex, 15160) + // msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, 15160) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) @@ -47,40 +72,26 @@ func TestMsgValidator(t *testing.T) { // require.NoError(t, err) // topics := commons.ValidatorTopicID(pk) // pmsg := newPBMsg(raw, topics[0], []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - // res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) + //}) t.Run("empty message", func(t *testing.T) { pmsg := newPBMsg([]byte{}, "xxx", []byte{}) - res := mv(context.Background(), "xxxx", pmsg) - require.Equal(t, res, pubsub.ValidationReject) + res := mv.ValidatePubsubMessage(context.Background(), "xxxx", pmsg) + require.Equal(t, pubsub.ValidationReject, res) }) // TODO: enable once topic validation is in place - // t.Run("invalid validator public key", func(t *testing.T) { + //t.Run("invalid validator public key", func(t *testing.T) { // msg, err := dummySSVConsensusMsg("10101011", 1) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) // pmsg := newPBMsg(raw, "xxx", []byte{}) - // res := mv(context.Background(), "xxxx", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "xxxx", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) - -} - -func createSharePublicKeys(n int) []string { - threshold.Init() - - var res []string - for i := 0; i < n; i++ { - sk := bls.SecretKey{} - sk.SetByCSPRNG() - pk := sk.GetPublicKey().SerializeToHexStr() - res = append(res, pk) - } - return res + //}) } func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { @@ -93,26 +104,19 @@ func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { return pmsg } -func dummySSVConsensusMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { - pk, err := hex.DecodeString(pkHex) +func dummySSVConsensusMsg(pk spectypes.ValidatorPK, height qbft.Height) (*spectypes.SSVMessage, error) { + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + ks := spectestingutils.Testing4SharesSet() + validSignedMessage := spectestingutils.TestingRoundChangeMessageWithHeightAndIdentifier(ks.Shares[1], 1, height, id[:]) + + encodedSignedMessage, err := validSignedMessage.Encode() if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) + return &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), + Data: encodedSignedMessage, }, nil } diff --git a/network/topics/params/gossipsub.go b/network/topics/params/gossipsub.go index 5e7945768d..c7d51ba8a1 100644 --- a/network/topics/params/gossipsub.go +++ b/network/topics/params/gossipsub.go @@ -6,7 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" ) -var ( +const ( // gsD topic stable mesh target count gsD = 8 // gsDlo topic stable mesh low watermark diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index b7b19fc8ef..a7b0942f34 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -22,7 +22,8 @@ const ( // using value of 50 (prysm changed to 90) dampeningFactor = 50 - subnetTopicsWeight = 4.0 + subnetTopicsWeight = 4.0 + invalidMeshDeliveriesWeight = -800 ) const ( @@ -167,7 +168,7 @@ func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { } if opts.Topic.InvalidMsgDecayTime > 0 { - params.InvalidMessageDeliveriesWeight = -opts.maxScore() / opts.Topic.TopicWeight + params.InvalidMessageDeliveriesWeight = invalidMeshDeliveriesWeight params.InvalidMessageDeliveriesDecay = scoreDecay(opts.Topic.InvalidMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) } else { params.InvalidMessageDeliveriesDecay = 0.1 diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index b4b67b4833..2422422e2b 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -26,7 +26,7 @@ const ( ) // the following are kept in vars to allow flexibility (e.g. in tests) -var ( +const ( // validationQueueSize is the size that we assign to the validation queue validationQueueSize = 512 // outboundQueueSize is the size that we assign to the outbound message queue @@ -34,32 +34,34 @@ var ( // validateThrottle is the amount of goroutines used for pubsub msg validation validateThrottle = 8192 // scoreInspectInterval is the interval for performing score inspect, which goes over all peers scores - scoreInspectInterval = time.Minute + defaultScoreInspectInterval = time.Minute // msgIDCacheTTL specifies how long a message ID will be remembered as seen, 6.4m (as ETH 2.0) msgIDCacheTTL = params.HeartbeatInterval * 550 ) -// PububConfig is the needed config to instantiate pubsub -type PububConfig struct { +// PubSubConfig is the needed config to instantiate pubsub +type PubSubConfig struct { Host host.Host TraceLog bool StaticPeers []peer.AddrInfo MsgHandler PubsubMessageHandler - // MsgValidatorFactory accepts the topic name and returns the corresponding msg validator + // MsgValidator accepts the topic name and returns the corresponding msg validator // in case we need different validators for specific topics, // this should be the place to map a validator to topic - MsgValidatorFactory func(string) MsgValidatorFunc - ScoreIndex peers.ScoreIndex - Scoring *ScoringConfig - MsgIDHandler MsgIDHandler - Discovery discovery.Discovery + MsgValidator messageValidator + ScoreIndex peers.ScoreIndex + Scoring *ScoringConfig + MsgIDHandler MsgIDHandler + Discovery discovery.Discovery ValidateThrottle int ValidationQueueSize int OutboundQueueSize int MsgIDCacheTTL time.Duration - GetValidatorStats network.GetValidatorStats + GetValidatorStats network.GetValidatorStats + ScoreInspector pubsub.ExtendedPeerScoreInspectFn + ScoreInspectorInterval time.Duration } // ScoringConfig is the configuration for peer scoring @@ -76,7 +78,7 @@ type PubsubBundle struct { Resolver MsgPeersResolver } -func (cfg *PububConfig) init() error { +func (cfg *PubSubConfig) init() error { if cfg.Host == nil { return errors.New("bad args: missing host") } @@ -96,14 +98,14 @@ func (cfg *PububConfig) init() error { } // initScoring initializes scoring config -func (cfg *PububConfig) initScoring() { +func (cfg *PubSubConfig) initScoring() { if cfg.Scoring == nil { cfg.Scoring = DefaultScoringConfig() } } -// NewPubsub creates a new pubsub router and the necessary components -func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubsub.PubSub, Controller, error) { +// NewPubSub creates a new pubsub router and the necessary components +func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig) (*pubsub.PubSub, Controller, error) { if err := cfg.init(); err != nil { return nil, nil, err } @@ -133,12 +135,23 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs } var topicScoreFactory func(string) *pubsub.TopicScoreParams - if cfg.ScoreIndex != nil { + + inspector := cfg.ScoreInspector + inspectInterval := cfg.ScoreInspectorInterval + if cfg.ScoreIndex != nil || inspector != nil { cfg.initScoring() - inspector := scoreInspector(logger, cfg.ScoreIndex) + + if inspector == nil { + inspector = scoreInspector(logger, cfg.ScoreIndex) + } + + if inspectInterval == 0 { + inspectInterval = defaultScoreInspectInterval + } + peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPColocationWeight, 0, cfg.Scoring.IPWhilelist...) psOpts = append(psOpts, pubsub.WithPeerScore(peerScoreParams, params.PeerScoreThresholds()), - pubsub.WithPeerScoreInspect(inspector, scoreInspectInterval)) + pubsub.WithPeerScoreInspect(inspector, inspectInterval)) async.Interval(ctx, time.Hour, func() { // reset peer scores metric every hour because it has a label for peer ID which can grow infinitely metricPubsubPeerScoreInspect.Reset() @@ -169,7 +182,7 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs return nil, nil, err } - ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidatorFactory, sf, ps, topicScoreFactory) + ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidator, sf, ps, topicScoreFactory) return ps, ctrl, nil } diff --git a/network/topics/scoring.go b/network/topics/scoring.go index ee0360364a..9e47514262 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -54,7 +54,7 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.Extend } // topicScoreParams factory for creating scoring params for topics -func topicScoreParams(logger *zap.Logger, cfg *PububConfig) func(string) *pubsub.TopicScoreParams { +func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsub.TopicScoreParams { return func(t string) *pubsub.TopicScoreParams { totalValidators, activeValidators, myValidators, err := cfg.GetValidatorStats() if err != nil { diff --git a/networkconfig/config.go b/networkconfig/config.go index de65d48fe4..a4791e878e 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -14,6 +14,7 @@ import ( var SupportedConfigs = map[string]NetworkConfig{ Mainnet.Name: Mainnet, + HoleskyStage.Name: HoleskyStage, JatoV2Stage.Name: JatoV2Stage, JatoV2.Name: JatoV2, LocalTestnet.Name: LocalTestnet, @@ -61,3 +62,8 @@ func (n NetworkConfig) SlotDurationSec() time.Duration { func (n NetworkConfig) SlotsPerEpoch() uint64 { return n.Beacon.SlotsPerEpoch() } + +// GetGenesisTime returns the genesis time in unix time. +func (n NetworkConfig) GetGenesisTime() time.Time { + return time.Unix(int64(n.Beacon.MinGenesisTime()), 0) +} diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go new file mode 100644 index 0000000000..c3e9d1aa8a --- /dev/null +++ b/networkconfig/holesky-stage.go @@ -0,0 +1,22 @@ +package networkconfig + +import ( + "math/big" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" +) + +var HoleskyStage = NetworkConfig{ + Name: "holesky-stage", + Beacon: beacon.NewNetwork(spectypes.HoleskyNetwork), + Domain: [4]byte{0x00, 0x00, 0x31, 0x12}, + GenesisEpoch: 1, + RegistrySyncOffset: new(big.Int).SetInt64(84599), + RegistryContractAddr: "0x0d33801785340072C452b994496B19f196b7eE15", + Bootnodes: []string{ + "enr:-Li4QNUN0RdeoHjI4Np18-PX1VXrJ2rJMo2OarRz0wCAxiYlD3s_E4zsmXi1LHv62ULLBT-AQfZIjYefEoEsMDkaEKCGAYtCguORh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhKfrtCyJc2VjcDI1NmsxoQP2e508AoA0B-KH-IaAd3nVCfI9q16lNztV-oTpcH72tIN0Y3CCE4mDdWRwgg-h", + }, + WhitelistedOperatorKeys: []string{}, +} diff --git a/operator/duties/attester.go b/operator/duties/attester.go index 6af6f4abd1..f89cbaf867 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -11,19 +11,20 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type AttesterHandler struct { baseHandler - duties *Duties[*eth2apiv1.AttesterDuty] + duties *dutystore.Duties[eth2apiv1.AttesterDuty] fetchCurrentEpoch bool fetchNextEpoch bool } -func NewAttesterHandler() *AttesterHandler { +func NewAttesterHandler(duties *dutystore.Duties[eth2apiv1.AttesterDuty]) *AttesterHandler { h := &AttesterHandler{ - duties: NewDuties[*eth2apiv1.AttesterDuty](), + duties: duties, } h.fetchCurrentEpoch = true h.fetchFirst = true @@ -52,7 +53,7 @@ func (h *AttesterHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // 4. If necessary, fetch duties for the next epoch. // @@ -69,7 +70,8 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) @@ -82,7 +84,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.indicesChanged = false } h.processFetching(ctx, currentEpoch, slot) @@ -98,7 +100,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%slotsPerEpoch == slotsPerEpoch-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) } case reorgEvent := <-h.reorg: @@ -108,18 +110,18 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Previous { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true h.fetchCurrentEpoch = true if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } else if reorgEvent.Current { // reset & re-fetch next epoch duties if in appropriate slot range, // otherwise they will be fetched by the appropriate slot tick. if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -135,7 +137,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset next epoch duties if in appropriate slot range if h.shouldFetchNexEpoch(slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -164,24 +166,26 @@ func (h *AttesterHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *AttesterHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) } } + + h.executeDuties(h.logger, toExecute) } func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) + indices := h.validatorController.CommitteeActiveIndices(epoch) if len(indices) == 0 { return nil @@ -194,7 +198,7 @@ func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, true) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleAttester)) } @@ -245,8 +249,7 @@ func (h *AttesterHandler) shouldExecute(duty *eth2apiv1.AttesterDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/attester_test.go b/operator/duties/attester_test.go index e0927c1f0a..4292ddf395 100644 --- a/operator/duties/attester_test.go +++ b/operator/duties/attester_test.go @@ -5,50 +5,52 @@ import ( "testing" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.AttesterDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.AttesterDuty) []*spectypes.Duty { +func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*eth2apiv1.AttesterDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleAttester)) @@ -59,15 +61,15 @@ func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.Attes func TestScheduler_Attester_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -95,15 +97,15 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { func TestScheduler_Attester_Diff_Slots(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -136,9 +138,9 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { func TestScheduler_Attester_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -152,7 +154,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { scheduler.indicesChg <- struct{}{} // no execution should happen in slot 0 waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -180,7 +182,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { // STEP 4: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[2]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -193,9 +195,9 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -213,7 +215,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(1), @@ -223,7 +225,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 4: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(2), @@ -238,7 +240,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[0]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -247,7 +249,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[1]}) + expected = expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -261,15 +263,15 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -282,8 +284,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -298,13 +300,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -341,15 +343,15 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { // reorg previous dependent root changed and the indices changed as well func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -363,8 +365,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -379,13 +381,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -398,7 +400,7 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(67), ValidatorIndex: phase0.ValidatorIndex(2), @@ -432,15 +434,15 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -453,8 +455,8 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -468,13 +470,13 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -511,15 +513,15 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { // reorg previous dependent root changed and the indices changed the same slot func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -532,8 +534,8 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -547,13 +549,13 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -566,7 +568,7 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(36), ValidatorIndex: phase0.ValidatorIndex(2), @@ -600,15 +602,15 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_Attester_Reorg_Current(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -621,8 +623,8 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -636,13 +638,13 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -687,15 +689,15 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -708,8 +710,8 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -723,13 +725,13 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -742,7 +744,7 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(65), ValidatorIndex: phase0.ValidatorIndex(2), @@ -783,15 +785,15 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Early_Block(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -817,8 +819,8 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) // STEP 4: trigger head event (block arrival) - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), }, } @@ -833,15 +835,15 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(31)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), @@ -869,15 +871,15 @@ func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(13)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index 15303fef68..c3c22ebbe2 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -3,11 +3,11 @@ package duties import ( "context" - "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/slotticker" ) //go:generate mockgen -package=duties -destination=./base_handler_mock.go -source=./base_handler.go @@ -16,7 +16,7 @@ import ( type ExecuteDutiesFunc func(logger *zap.Logger, duties []*spectypes.Duty) type dutyHandler interface { - Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, chan phase0.Slot, chan ReorgEvent, chan struct{}) + Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, slotticker.Provider, chan ReorgEvent, chan struct{}) HandleDuties(context.Context) Name() string } @@ -27,7 +27,7 @@ type baseHandler struct { network networkconfig.NetworkConfig validatorController ValidatorController executeDuties ExecuteDutiesFunc - ticker chan phase0.Slot + ticker slotticker.SlotTicker reorg chan ReorgEvent indicesChange chan struct{} @@ -43,7 +43,7 @@ func (h *baseHandler) Setup( network networkconfig.NetworkConfig, validatorController ValidatorController, executeDuties ExecuteDutiesFunc, - ticker chan phase0.Slot, + slotTickerProvider slotticker.Provider, reorgEvents chan ReorgEvent, indicesChange chan struct{}, ) { @@ -52,28 +52,12 @@ func (h *baseHandler) Setup( h.network = network h.validatorController = validatorController h.executeDuties = executeDuties - h.ticker = ticker + h.ticker = slotTickerProvider() h.reorg = reorgEvents h.indicesChange = indicesChange } -type Duties[D any] struct { - m map[phase0.Epoch]map[phase0.Slot][]D -} - -func NewDuties[D any]() *Duties[D] { - return &Duties[D]{ - m: make(map[phase0.Epoch]map[phase0.Slot][]D), - } -} - -func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, duty D) { - if _, ok := d.m[epoch]; !ok { - d.m[epoch] = make(map[phase0.Slot][]D) - } - d.m[epoch][slot] = append(d.m[epoch][slot], duty) -} - -func (d *Duties[D]) Reset(epoch phase0.Epoch) { - delete(d.m, epoch) +func (h *baseHandler) warnMisalignedSlotAndDuty(dutyType string) { + h.logger.Debug("current slot and duty slot are not aligned, "+ + "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", dutyType)) } diff --git a/operator/duties/base_handler_mock.go b/operator/duties/base_handler_mock.go index 801ca2dc8c..6177f369f3 100644 --- a/operator/duties/base_handler_mock.go +++ b/operator/duties/base_handler_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" networkconfig "github.com/bloxapp/ssv/networkconfig" + slotticker "github.com/bloxapp/ssv/operator/slotticker" gomock "github.com/golang/mock/gomock" zap "go.uber.org/zap" ) @@ -64,7 +64,7 @@ func (mr *MockdutyHandlerMockRecorder) Name() *gomock.Call { } // Setup mocks base method. -func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 chan phase0.Slot, arg7 chan ReorgEvent, arg8 chan struct{}) { +func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 slotticker.Provider, arg7 chan ReorgEvent, arg8 chan struct{}) { m.ctrl.T.Helper() m.ctrl.Call(m, "Setup", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } diff --git a/operator/duties/dutystore/duties.go b/operator/duties/dutystore/duties.go new file mode 100644 index 0000000000..50fd0d7e22 --- /dev/null +++ b/operator/duties/dutystore/duties.go @@ -0,0 +1,97 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type Duty interface { + eth2apiv1.AttesterDuty | eth2apiv1.ProposerDuty | eth2apiv1.SyncCommitteeDuty +} + +type dutyDescriptor[D Duty] struct { + duty *D + inCommittee bool +} + +type Duties[D Duty] struct { + mu sync.RWMutex + m map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D] +} + +func NewDuties[D Duty]() *Duties[D] { + return &Duties[D]{ + m: make(map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]), + } +} + +func (d *Duties[D]) CommitteeSlotDuties(epoch phase0.Epoch, slot phase0.Slot) []*D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + var duties []*D + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *Duties[D]) ValidatorDuty(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex) *D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + descriptor, ok := descriptorMap[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex, duty *D, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[epoch]; !ok { + d.m[epoch] = make(map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + if _, ok := d.m[epoch][slot]; !ok { + d.m[epoch][slot] = make(map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + d.m[epoch][slot][validatorIndex] = dutyDescriptor[D]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *Duties[D]) ResetEpoch(epoch phase0.Epoch) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, epoch) +} diff --git a/operator/duties/dutystore/store.go b/operator/duties/dutystore/store.go new file mode 100644 index 0000000000..53dbfaefcc --- /dev/null +++ b/operator/duties/dutystore/store.go @@ -0,0 +1,19 @@ +package dutystore + +import ( + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" +) + +type Store struct { + Attester *Duties[eth2apiv1.AttesterDuty] + Proposer *Duties[eth2apiv1.ProposerDuty] + SyncCommittee *SyncCommitteeDuties +} + +func New() *Store { + return &Store{ + Attester: NewDuties[eth2apiv1.AttesterDuty](), + Proposer: NewDuties[eth2apiv1.ProposerDuty](), + SyncCommittee: NewSyncCommitteeDuties(), + } +} diff --git a/operator/duties/dutystore/sync_committee.go b/operator/duties/dutystore/sync_committee.go new file mode 100644 index 0000000000..0ae13041c7 --- /dev/null +++ b/operator/duties/dutystore/sync_committee.go @@ -0,0 +1,76 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type SyncCommitteeDuties struct { + mu sync.RWMutex + m map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty] +} + +func NewSyncCommitteeDuties() *SyncCommitteeDuties { + return &SyncCommitteeDuties{ + m: make(map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]), + } +} + +func (d *SyncCommitteeDuties) CommitteePeriodDuties(period uint64) []*eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + descriptorMap, ok := d.m[period] + if !ok { + return nil + } + + var duties []*eth2apiv1.SyncCommitteeDuty + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *SyncCommitteeDuties) Duty(period uint64, validatorIndex phase0.ValidatorIndex) *eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + duties, ok := d.m[period] + if !ok { + return nil + } + + descriptor, ok := duties[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *SyncCommitteeDuties) Add(period uint64, validatorIndex phase0.ValidatorIndex, duty *eth2apiv1.SyncCommitteeDuty, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[period]; !ok { + d.m[period] = make(map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]) + } + + d.m[period][validatorIndex] = dutyDescriptor[eth2apiv1.SyncCommitteeDuty]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *SyncCommitteeDuties) Reset(period uint64) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, period) +} diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/mocks/scheduler.go index 00cd929622..7195d58dcd 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/mocks/scheduler.go @@ -7,13 +7,13 @@ package mocks import ( context "context" reflect "reflect" + time "time" client "github.com/attestantio/go-eth2-client" v1 "github.com/attestantio/go-eth2-client/api/v1" phase0 "github.com/attestantio/go-eth2-client/spec/phase0" types "github.com/bloxapp/ssv/protocol/v2/types" gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" ) // MockSlotTicker is a mock of SlotTicker interface. @@ -39,18 +39,32 @@ func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { return m.recorder } -// Subscribe mocks base method. -func (m *MockSlotTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) return ret0 } -// Subscribe indicates an expected call of Subscribe. -func (mr *MockSlotTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockSlotTicker)(nil).Subscribe), subscription) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) } // MockBeaconNode is a mock of BeaconNode interface. @@ -186,18 +200,32 @@ func (m *MockValidatorController) EXPECT() *MockValidatorControllerMockRecorder return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockValidatorController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockValidatorController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockValidatorControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).AllActiveIndices), epoch) +} + +// CommitteeActiveIndices mocks base method. +func (m *MockValidatorController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockValidatorControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockValidatorControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockValidatorController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).CommitteeActiveIndices), epoch) } // GetOperatorShares mocks base method. diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 60fde29186..d65b25b0e1 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -11,17 +11,18 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type ProposerHandler struct { baseHandler - duties *Duties[*eth2apiv1.ProposerDuty] + duties *dutystore.Duties[eth2apiv1.ProposerDuty] } -func NewProposerHandler() *ProposerHandler { +func NewProposerHandler(duties *dutystore.Duties[eth2apiv1.ProposerDuty]) *ProposerHandler { return &ProposerHandler{ - duties: NewDuties[*eth2apiv1.ProposerDuty](), + duties: duties, baseHandler: baseHandler{ fetchFirst: true, }, @@ -44,7 +45,7 @@ func (h *ProposerHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // // On Ticker event: @@ -58,7 +59,8 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) @@ -71,7 +73,6 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) h.indicesChanged = false h.processFetching(ctx, currentEpoch, slot) } @@ -79,7 +80,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%h.network.Beacon.SlotsPerEpoch() == h.network.Beacon.SlotsPerEpoch()-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch - 1) h.fetchFirst = true } @@ -90,7 +91,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Current { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true } @@ -116,36 +117,46 @@ func (h *ProposerHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *ProposerHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) } } + h.executeDuties(h.logger, toExecute) } func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) - if len(indices) == 0 { + allIndices := h.validatorController.AllActiveIndices(epoch) + if len(allIndices) == 0 { return nil } - duties, err := h.beaconNode.ProposerDuties(ctx, epoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(epoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.ProposerDuties(ctx, epoch, allIndices) if err != nil { return fmt.Errorf("failed to fetch proposer duties: %w", err) } + h.duties.ResetEpoch(epoch) + specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, inCommitteeDuty) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleProposer)) } @@ -174,8 +185,7 @@ func (h *ProposerHandler) shouldExecute(duty *eth2apiv1.ProposerDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/proposer_test.go b/operator/duties/proposer_test.go index 8df730b6d3..56860c3c0e 100644 --- a/operator/duties/proposer_test.go +++ b/operator/duties/proposer_test.go @@ -4,48 +4,50 @@ import ( "context" "testing" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().ProposerDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.ProposerDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.ProposerDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.ProposerDuty) []*spectypes.Duty { +func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*eth2apiv1.ProposerDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleProposer)) @@ -55,15 +57,15 @@ func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.Propo func TestScheduler_Proposer_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -87,15 +89,15 @@ func TestScheduler_Proposer_Same_Slot(t *testing.T) { func TestScheduler_Proposer_Diff_Slots(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -129,9 +131,9 @@ func TestScheduler_Proposer_Diff_Slots(t *testing.T) { // execute duty after two slots after the indices changed func TestScheduler_Proposer_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -148,7 +150,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -178,7 +180,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 4: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -191,15 +193,15 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -215,7 +217,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(2), @@ -225,7 +227,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 5}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(3), @@ -239,7 +241,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 5: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -248,7 +250,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -257,7 +259,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -271,15 +273,15 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -292,8 +294,8 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -307,13 +309,13 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -346,15 +348,15 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -367,8 +369,8 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -382,13 +384,13 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -401,7 +403,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(38), ValidatorIndex: phase0.ValidatorIndex(2), @@ -417,7 +419,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 7: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(37)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -426,7 +428,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 8: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(38)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index cb1f5861c6..0ee6979ff8 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -11,6 +11,8 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/sourcegraph/conc/pool" "go.uber.org/zap" @@ -19,11 +21,26 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/protocol/v2/types" ) //go:generate mockgen -package=mocks -destination=./mocks/scheduler.go -source=./scheduler.go +var slotDelayHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "slot_ticker_delay_milliseconds", + Help: "The delay in milliseconds of the slot ticker", + Buckets: []float64{5, 10, 20, 100, 500, 5000}, // Buckets in milliseconds. Adjust as per your needs. +}) + +func init() { + logger := zap.L() + if err := prometheus.Register(slotDelayHistogram); err != nil { + logger.Debug("could not register prometheus collector") + } +} + const ( // blockPropagationDelay time to propagate around the nodes // before kicking off duties for the block's slot. @@ -31,7 +48,8 @@ const ( ) type SlotTicker interface { - Subscribe(subscription chan phase0.Slot) event.Subscription + Next() <-chan time.Time + Slot() phase0.Slot } type BeaconNode interface { @@ -45,7 +63,8 @@ type BeaconNode interface { // ValidatorController represents the component that controls validators via the scheduler type ValidatorController interface { - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetOperatorShares() []*types.SSVShare } @@ -58,15 +77,16 @@ type SchedulerOptions struct { ValidatorController ValidatorController ExecuteDuty ExecuteDutyFunc IndicesChg chan struct{} - Ticker SlotTicker + SlotTickerProvider slotticker.Provider BuilderProposals bool + DutyStore *dutystore.Store } type Scheduler struct { beaconNode BeaconNode network networkconfig.NetworkConfig validatorController ValidatorController - slotTicker SlotTicker + slotTickerProvider slotticker.Provider executeDuty ExecuteDutyFunc builderProposals bool @@ -75,7 +95,7 @@ type Scheduler struct { reorg chan ReorgEvent indicesChg chan struct{} - ticker chan phase0.Slot + ticker slotticker.SlotTicker waitCond *sync.Cond pool *pool.ContextPool @@ -86,10 +106,15 @@ type Scheduler struct { } func NewScheduler(opts *SchedulerOptions) *Scheduler { + dutyStore := opts.DutyStore + if dutyStore == nil { + dutyStore = dutystore.New() + } + s := &Scheduler{ beaconNode: opts.BeaconNode, network: opts.Network, - slotTicker: opts.Ticker, + slotTickerProvider: opts.SlotTickerProvider, executeDuty: opts.ExecuteDuty, validatorController: opts.ValidatorController, builderProposals: opts.BuilderProposals, @@ -97,12 +122,12 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { blockPropagateDelay: blockPropagationDelay, handlers: []dutyHandler{ - NewAttesterHandler(), - NewProposerHandler(), - NewSyncCommitteeHandler(), + NewAttesterHandler(dutyStore.Attester), + NewProposerHandler(dutyStore.Proposer), + NewSyncCommitteeHandler(dutyStore.SyncCommittee), }, - ticker: make(chan phase0.Slot), + ticker: opts.SlotTickerProvider(), reorg: make(chan ReorgEvent), waitCond: sync.NewCond(&sync.Mutex{}), } @@ -135,8 +160,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { for _, handler := range s.handlers { handler := handler - slotTicker := make(chan phase0.Slot) - s.slotTicker.Subscribe(slotTicker) indicesChangeCh := make(chan struct{}) indicesChangeFeed.Subscribe(indicesChangeCh) @@ -150,7 +173,7 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { s.network, s.validatorController, s.ExecuteDuties, - slotTicker, + s.slotTickerProvider, reorgCh, indicesChangeCh, ) @@ -162,7 +185,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { }) } - s.slotTicker.Subscribe(s.ticker) go s.SlotTicker(ctx) go indicesChangeFeed.FanOut(ctx, s.indicesChg) @@ -214,7 +236,9 @@ func (s *Scheduler) SlotTicker(ctx context.Context) { select { case <-ctx.Done(): return - case slot := <-s.ticker: + case <-s.ticker.Next(): + slot := s.ticker.Slot() + delay := s.network.SlotDurationSec() / time.Duration(goclient.IntervalsPerSlot) /* a third of the slot duration */ finalTime := s.network.Beacon.GetSlotStartTime(slot).Add(delay) waitDuration := time.Until(finalTime) @@ -322,6 +346,11 @@ func (s *Scheduler) ExecuteDuties(logger *zap.Logger, duties []*spectypes.Duty) for _, duty := range duties { duty := duty logger := s.loggerWithDutyContext(logger, duty) + slotDelay := time.Since(s.network.Beacon.GetSlotStartTime(duty.Slot)) + if slotDelay >= 100*time.Millisecond { + logger.Debug("⚠️ late duty execution", zap.Int64("slot_delay", slotDelay.Milliseconds())) + } + slotDelayHistogram.Observe(float64(slotDelay.Milliseconds())) go func() { if duty.Type == spectypes.BNRoleAttester || duty.Type == spectypes.BNRoleSyncCommittee { s.waitOneThirdOrValidBlock(duty.Slot) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 342ba9e0cd..3a98de7e7c 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -17,35 +17,80 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties/mocks" - mockslotticker "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + mockslotticker "github.com/bloxapp/ssv/operator/slotticker/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) +type MockSlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot + Subscribe() chan phase0.Slot +} + type mockSlotTicker struct { - event.Feed + slotChan chan phase0.Slot + timeChan chan time.Time + slot phase0.Slot + mu sync.Mutex +} + +func NewMockSlotTicker() MockSlotTicker { + ticker := &mockSlotTicker{ + slotChan: make(chan phase0.Slot), + timeChan: make(chan time.Time), + } + ticker.start() + return ticker +} + +func (m *mockSlotTicker) start() { + go func() { + for slot := range m.slotChan { + m.mu.Lock() + m.slot = slot + m.mu.Unlock() + m.timeChan <- time.Now() + } + }() +} + +func (m *mockSlotTicker) Next() <-chan time.Time { + return m.timeChan +} + +func (m *mockSlotTicker) Slot() phase0.Slot { + m.mu.Lock() + defer m.mu.Unlock() + return m.slot +} + +func (m *mockSlotTicker) Subscribe() chan phase0.Slot { + return m.slotChan } -func (m *mockSlotTicker) Subscribe(subscriber chan phase0.Slot) event.Subscription { - return m.Feed.Subscribe(subscriber) +type mockSlotTickerService struct { + event.Feed } func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *SlotValue) ( *Scheduler, *zap.Logger, - *mockSlotTicker, + *mockSlotTickerService, time.Duration, context.CancelFunc, *pool.ContextPool, ) { ctrl := gomock.NewController(t) - timeout := 100 * time.Millisecond + // A 200ms timeout ensures the test passes, even with mockSlotTicker overhead. + timeout := 200 * time.Millisecond ctx, cancel := context.WithCancel(context.Background()) logger := logging.TestLogger(t) mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := &mockSlotTicker{} + mockSlotService := &mockSlotTickerService{} mockNetworkConfig := networkconfig.NetworkConfig{ Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), } @@ -55,8 +100,12 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot BeaconNode: mockBeaconNode, Network: mockNetworkConfig, ValidatorController: mockValidatorController, - Ticker: mockTicker, - BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + ticker := NewMockSlotTicker() + mockSlotService.Subscribe(ticker.Subscribe()) + return ticker + }, + BuilderProposals: false, } s := NewScheduler(opts) @@ -103,7 +152,7 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot return s.Wait() }) - return s, logger, mockTicker, timeout, cancel, schedulerPool + return s, logger, mockSlotService, timeout, cancel, schedulerPool } func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.Duty, executeDutiesCallSize int) { @@ -199,7 +248,7 @@ func TestScheduler_Run(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers mockDutyHandler1 := NewMockdutyHandler(ctrl) mockDutyHandler2 := NewMockdutyHandler(ctrl) @@ -209,8 +258,10 @@ func TestScheduler_Run(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, } s := NewScheduler(opts) @@ -218,7 +269,7 @@ func TestScheduler_Run(t *testing.T) { s.handlers = []dutyHandler{mockDutyHandler1, mockDutyHandler2} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() // setup mock duty handler expectations for _, mockDutyHandler := range s.handlers { @@ -248,7 +299,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers opts := &SchedulerOptions{ @@ -256,8 +307,10 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, - IndicesChg: make(chan struct{}), + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, + IndicesChg: make(chan struct{}), BuilderProposals: true, } @@ -267,7 +320,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { // add multiple mock duty handlers s.handlers = []dutyHandler{NewValidatorRegistrationHandler()} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() err := s.Start(ctx, logger) require.NoError(t, err) diff --git a/operator/duties/synccommittee.go b/operator/duties/sync_committee.go similarity index 84% rename from operator/duties/synccommittee.go rename to operator/duties/sync_committee.go index 0569d7cbfd..03c2e60037 100644 --- a/operator/duties/synccommittee.go +++ b/operator/duties/sync_committee.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) // syncCommitteePreparationEpochs is the number of epochs ahead of the sync committee @@ -21,14 +22,14 @@ var syncCommitteePreparationEpochs = uint64(2) type SyncCommitteeHandler struct { baseHandler - duties *SyncCommitteeDuties + duties *dutystore.SyncCommitteeDuties fetchCurrentPeriod bool fetchNextPeriod bool } -func NewSyncCommitteeHandler() *SyncCommitteeHandler { +func NewSyncCommitteeHandler(duties *dutystore.SyncCommitteeDuties) *SyncCommitteeHandler { h := &SyncCommitteeHandler{ - duties: NewSyncCommitteeDuties(), + duties: duties, } h.fetchCurrentPeriod = true h.fetchFirst = true @@ -39,27 +40,6 @@ func (h *SyncCommitteeHandler) Name() string { return spectypes.BNRoleSyncCommittee.String() } -type SyncCommitteeDuties struct { - m map[uint64][]*eth2apiv1.SyncCommitteeDuty -} - -func NewSyncCommitteeDuties() *SyncCommitteeDuties { - return &SyncCommitteeDuties{ - m: make(map[uint64][]*eth2apiv1.SyncCommitteeDuty), - } -} - -func (d *SyncCommitteeDuties) Add(period uint64, duty *eth2apiv1.SyncCommitteeDuty) { - if _, ok := d.m[period]; !ok { - d.m[period] = []*eth2apiv1.SyncCommitteeDuty{} - } - d.m[period] = append(d.m[period], duty) -} - -func (d *SyncCommitteeDuties) Reset(period uint64) { - delete(d.m, period) -} - // HandleDuties manages the duty lifecycle, handling different cases: // // On First Run: @@ -73,7 +53,7 @@ func (d *SyncCommitteeDuties) Reset(period uint64) { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current period. +// 2. ResetEpoch duties for the current period. // 3. Fetch duties for the current period. // 4. If necessary, fetch duties for the next period. // @@ -92,7 +72,8 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) buildStr := fmt.Sprintf("p%v-%v-s%v-#%v", period, epoch, slot, slot%32+1) @@ -100,15 +81,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { if h.fetchFirst { h.fetchFirst = false - h.indicesChanged = false h.processFetching(ctx, period, slot) h.processExecution(period, slot) } else { h.processExecution(period, slot) - if h.indicesChanged { - h.duties.Reset(period) - h.indicesChanged = false - } h.processFetching(ctx, period, slot) } @@ -123,7 +99,7 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { // last slot of period if slot == h.network.Beacon.LastSlotOfSyncPeriod(period) { - h.duties.Reset(period) + h.duties.Reset(period - 1) } case reorgEvent := <-h.reorg: @@ -146,12 +122,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { buildStr := fmt.Sprintf("p%v-e%v-s%v-#%v", period, epoch, slot, slot%32+1) h.logger.Info("🔁 indices change received", zap.String("period_epoch_slot_seq", buildStr)) - h.indicesChanged = true h.fetchCurrentPeriod = true // reset next period duties if in appropriate slot range if h.shouldFetchNextPeriod(slot) { - h.duties.Reset(period + 1) h.fetchNextPeriod = true } } @@ -181,16 +155,19 @@ func (h *SyncCommitteeHandler) processFetching(ctx context.Context, period uint6 func (h *SyncCommitteeHandler) processExecution(period uint64, slot phase0.Slot) { // range over duties and execute - if duties, ok := h.duties.m[period]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d, slot) { - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) - } + duties := h.duties.CommitteePeriodDuties(period) + if duties == nil { + return + } + + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d, slot) { + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) } - h.executeDuties(h.logger, toExecute) } + h.executeDuties(h.logger, toExecute) } func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period uint64) error { @@ -202,19 +179,26 @@ func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period } lastEpoch := h.network.Beacon.FirstEpochOfSyncPeriod(period+1) - 1 - indices := h.validatorController.ActiveValidatorIndices(firstEpoch) - - if len(indices) == 0 { + allActiveIndices := h.validatorController.AllActiveIndices(firstEpoch) + if len(allActiveIndices) == 0 { return nil } - duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(firstEpoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, allActiveIndices) if err != nil { return fmt.Errorf("failed to fetch sync committee duties: %w", err) } + h.duties.Reset(period) for _, d := range duties { - h.duties.Add(period, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(period, d.ValidatorIndex, d, inCommitteeDuty) } h.prepareDutiesResultLog(period, duties, start) @@ -276,8 +260,7 @@ func (h *SyncCommitteeHandler) shouldExecute(duty *eth2apiv1.SyncCommitteeDuty, return true } if currentSlot+1 == slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/synccommittee_test.go b/operator/duties/sync_committee_test.go similarity index 94% rename from operator/duties/synccommittee_test.go rename to operator/duties/sync_committee_test.go index 774cc2c2a5..b2ec6d5d8b 100644 --- a/operator/duties/synccommittee_test.go +++ b/operator/duties/sync_committee_test.go @@ -12,6 +12,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) @@ -55,23 +56,24 @@ func setupSyncCommitteeDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[uint64, [ return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getDuties := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) - duties, _ := dutiesMap.Get(period) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + duties, _ := dutiesMap.Get(period) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() @@ -89,7 +91,7 @@ func expectedExecutedSyncCommitteeDuties(handler *SyncCommitteeHandler, duties [ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -148,7 +150,7 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -215,7 +217,7 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -269,7 +271,7 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -327,7 +329,7 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -399,7 +401,7 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -479,7 +481,7 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index 2ac3a49ea3..e8b6b79210 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -6,22 +6,16 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" ) const validatorRegistrationEpochInterval = uint64(10) type ValidatorRegistrationHandler struct { baseHandler - - validatorsPassedFirstRegistration map[string]struct{} } func NewValidatorRegistrationHandler() *ValidatorRegistrationHandler { - return &ValidatorRegistrationHandler{ - validatorsPassedFirstRegistration: map[string]struct{}{}, - } + return &ValidatorRegistrationHandler{} } func (h *ValidatorRegistrationHandler) Name() string { @@ -36,21 +30,19 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() shares := h.validatorController.GetOperatorShares() - sent := 0 + validators := []phase0.ValidatorIndex{} for _, share := range shares { - if !share.HasBeaconMetadata() { + if !share.HasBeaconMetadata() || !share.BeaconMetadata.IsAttesting() { continue } // if not passed first registration, should be registered within one epoch time in a corresponding slot // if passed first registration, should be registered within validatorRegistrationEpochInterval epochs time in a corresponding slot - registrationSlotInterval := h.network.SlotsPerEpoch() - if _, ok := h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)]; ok { - registrationSlotInterval *= validatorRegistrationEpochInterval - } + registrationSlotInterval := h.network.SlotsPerEpoch() * validatorRegistrationEpochInterval if uint64(share.BeaconMetadata.Index)%registrationSlotInterval != uint64(slot)%registrationSlotInterval { continue @@ -66,10 +58,11 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { // no need for other params }}) - sent++ - h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)] = struct{}{} + validators = append(validators, share.BeaconMetadata.Index) } - h.logger.Debug("validator registration duties sent", zap.Uint64("slot", uint64(slot)), fields.Count(sent)) + h.logger.Debug("validator registration duties sent", + zap.Uint64("slot", uint64(slot)), + zap.Any("validators", validators)) case <-h.indicesChange: continue diff --git a/operator/fee_recipient/controller.go b/operator/fee_recipient/controller.go index 477b40eed1..d44f20caca 100644 --- a/operator/fee_recipient/controller.go +++ b/operator/fee_recipient/controller.go @@ -10,7 +10,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/registry/storage" @@ -25,42 +25,40 @@ type RecipientController interface { // ControllerOptions holds the needed dependencies type ControllerOptions struct { - Ctx context.Context - BeaconClient beaconprotocol.BeaconNode - Network networkconfig.NetworkConfig - ShareStorage storage.Shares - RecipientStorage storage.Recipients - Ticker slot_ticker.Ticker - OperatorData *storage.OperatorData + Ctx context.Context + BeaconClient beaconprotocol.BeaconNode + Network networkconfig.NetworkConfig + ShareStorage storage.Shares + RecipientStorage storage.Recipients + SlotTickerProvider slotticker.Provider + OperatorData *storage.OperatorData } // recipientController implementation of RecipientController type recipientController struct { - ctx context.Context - beaconClient beaconprotocol.BeaconNode - network networkconfig.NetworkConfig - shareStorage storage.Shares - recipientStorage storage.Recipients - ticker slot_ticker.Ticker - operatorData *storage.OperatorData + ctx context.Context + beaconClient beaconprotocol.BeaconNode + network networkconfig.NetworkConfig + shareStorage storage.Shares + recipientStorage storage.Recipients + slotTickerProvider slotticker.Provider + operatorData *storage.OperatorData } func NewController(opts *ControllerOptions) *recipientController { return &recipientController{ - ctx: opts.Ctx, - beaconClient: opts.BeaconClient, - network: opts.Network, - shareStorage: opts.ShareStorage, - recipientStorage: opts.RecipientStorage, - ticker: opts.Ticker, - operatorData: opts.OperatorData, + ctx: opts.Ctx, + beaconClient: opts.BeaconClient, + network: opts.Network, + shareStorage: opts.ShareStorage, + recipientStorage: opts.RecipientStorage, + slotTickerProvider: opts.SlotTickerProvider, + operatorData: opts.OperatorData, } } func (rc *recipientController) Start(logger *zap.Logger) { - tickerChan := make(chan phase0.Slot, 32) - rc.ticker.Subscribe(tickerChan) - rc.listenToTicker(logger, tickerChan) + rc.listenToTicker(logger) } // listenToTicker loop over the given slot channel @@ -68,16 +66,19 @@ func (rc *recipientController) Start(logger *zap.Logger) { // in addition, submitting "same data" every slot is not efficient and can overload beacon node // instead we can subscribe to beacon node events and submit only when there is // a new fee recipient event (or new validator) was handled or when there is a syncing issue with beacon node -func (rc *recipientController) listenToTicker(logger *zap.Logger, slots chan phase0.Slot) { +func (rc *recipientController) listenToTicker(logger *zap.Logger) { firstTimeSubmitted := false - for currentSlot := range slots { + ticker := rc.slotTickerProvider() + for { + <-ticker.Next() + slot := ticker.Slot() // submit if first time or if first slot in epoch - if firstTimeSubmitted && uint64(currentSlot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { + if firstTimeSubmitted && uint64(slot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { continue } firstTimeSubmitted = true - err := rc.prepareAndSubmit(logger, currentSlot) + err := rc.prepareAndSubmit(logger, slot) if err != nil { logger.Warn("could not submit proposal preparations", zap.Error(err)) } diff --git a/operator/fee_recipient/controller_test.go b/operator/fee_recipient/controller_test.go index 02bf4144dd..6e1718afd6 100644 --- a/operator/fee_recipient/controller_test.go +++ b/operator/fee_recipient/controller_test.go @@ -13,13 +13,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/golang/mock/gomock" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + "github.com/bloxapp/ssv/operator/slotticker/mocks" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -52,32 +52,47 @@ func TestSubmitProposal(t *testing.T) { t.Run("submit first time or halfway through epoch", func(t *testing.T) { numberOfRequests := 4 var wg sync.WaitGroup + wg.Add(numberOfRequests) // Set up the wait group before starting goroutines + client := beacon.NewMockBeaconNode(ctrl) client.EXPECT().SubmitProposalPreparation(gomock.Any()).DoAndReturn(func(feeRecipients map[phase0.ValidatorIndex]bellatrix.ExecutionAddress) error { wg.Done() return nil - }).MinTimes(numberOfRequests).MaxTimes(numberOfRequests) // call first time and on the halfway through epoch. each time should be 2 request as we have two batches + }).Times(numberOfRequests) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 1 // first time - time.Sleep(time.Millisecond * 500) - subscription <- 2 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- 20 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- phase0.Slot(network.SlotsPerEpoch()) / 2 // halfway through epoch - time.Sleep(time.Millisecond * 500) - subscription <- 63 // should not call submit - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time) + mockSlotChan := make(chan phase0.Slot) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().DoAndReturn(func() phase0.Slot { + return <-mockSlotChan + }).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) - wg.Add(numberOfRequests) + + slots := []phase0.Slot{ + 1, // first time + 2, // should not call submit + 20, // should not call submit + phase0.Slot(network.SlotsPerEpoch()) / 2, // halfway through epoch + 63, // should not call submit + } + + for _, s := range slots { + mockTimeChan <- time.Now() + mockSlotChan <- s + time.Sleep(time.Millisecond * 500) + } + wg.Wait() + + close(mockTimeChan) // Close the channel after test + close(mockSlotChan) }) t.Run("error handling", func(t *testing.T) { @@ -88,18 +103,21 @@ func TestSubmitProposal(t *testing.T) { return errors.New("failed to submit") }).MinTimes(2).MaxTimes(2) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 100 // first time - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time, 1) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().Return(phase0.Slot(100)).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) + mockTimeChan <- time.Now() wg.Add(2) wg.Wait() + close(mockTimeChan) }) } diff --git a/operator/node.go b/operator/node.go index 3dc3589349..746f2ae494 100644 --- a/operator/node.go +++ b/operator/node.go @@ -15,8 +15,9 @@ import ( "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/fee_recipient" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" @@ -40,18 +41,16 @@ type Options struct { DB basedb.Database ValidatorController validator.Controller ValidatorOptions validator.ControllerOptions `yaml:"ValidatorOptions"` - - WS api.WebSocketServer - WsAPIPort int - - Metrics nodeMetrics + DutyStore *dutystore.Store + WS api.WebSocketServer + WsAPIPort int + Metrics nodeMetrics } // operatorNode implements Node interface type operatorNode struct { network networkconfig.NetworkConfig context context.Context - ticker slot_ticker.Ticker validatorsCtrl validator.Controller consensusClient beaconprotocol.BeaconNode executionClient *executionclient.ExecutionClient @@ -68,7 +67,7 @@ type operatorNode struct { } // New is the constructor of operatorNode -func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { +func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provider) Node { storageMap := qbftstorage.NewStores() roles := []spectypes.BeaconRole{ @@ -85,7 +84,6 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { node := &operatorNode{ context: opts.Context, - ticker: slotTicker, validatorsCtrl: opts.ValidatorController, network: opts.Network, consensusClient: opts.BeaconNode, @@ -100,17 +98,18 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { ValidatorController: opts.ValidatorController, IndicesChg: opts.ValidatorController.IndicesChangeChan(), ExecuteDuty: opts.ValidatorController.ExecuteDuty, - Ticker: slotTicker, BuilderProposals: opts.ValidatorOptions.BuilderProposals, + DutyStore: opts.DutyStore, + SlotTickerProvider: slotTickerProvider, }), feeRecipientCtrl: fee_recipient.NewController(&fee_recipient.ControllerOptions{ - Ctx: opts.Context, - BeaconClient: opts.BeaconNode, - Network: opts.Network, - ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), - RecipientStorage: opts.ValidatorOptions.RegistryStorage, - Ticker: slotTicker, - OperatorData: opts.ValidatorOptions.OperatorData, + Ctx: opts.Context, + BeaconClient: opts.BeaconNode, + Network: opts.Network, + ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), + RecipientStorage: opts.ValidatorOptions.RegistryStorage, + OperatorData: opts.ValidatorOptions.OperatorData, + SlotTickerProvider: slotTickerProvider, }), ws: opts.WS, @@ -140,7 +139,6 @@ func (n *operatorNode) Start(logger *zap.Logger) error { } }() - go n.ticker.Start(logger) n.validatorsCtrl.StartNetworkHandlers() n.validatorsCtrl.StartValidators() go n.net.UpdateSubnets(logger) diff --git a/operator/slot_ticker/mocks/ticker.go b/operator/slot_ticker/mocks/ticker.go deleted file mode 100644 index 2ed11c9fb9..0000000000 --- a/operator/slot_ticker/mocks/ticker.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./ticker.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" - gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" - zap "go.uber.org/zap" -) - -// MockTicker is a mock of Ticker interface. -type MockTicker struct { - ctrl *gomock.Controller - recorder *MockTickerMockRecorder -} - -// MockTickerMockRecorder is the mock recorder for MockTicker. -type MockTickerMockRecorder struct { - mock *MockTicker -} - -// NewMockTicker creates a new mock instance. -func NewMockTicker(ctrl *gomock.Controller) *MockTicker { - mock := &MockTicker{ctrl: ctrl} - mock.recorder = &MockTickerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTicker) EXPECT() *MockTickerMockRecorder { - return m.recorder -} - -// Start mocks base method. -func (m *MockTicker) Start(logger *zap.Logger) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start", logger) -} - -// Start indicates an expected call of Start. -func (mr *MockTickerMockRecorder) Start(logger interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockTicker)(nil).Start), logger) -} - -// Subscribe mocks base method. -func (m *MockTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) - return ret0 -} - -// Subscribe indicates an expected call of Subscribe. -func (mr *MockTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockTicker)(nil).Subscribe), subscription) -} diff --git a/operator/slot_ticker/slotticker.go b/operator/slot_ticker/slotticker.go deleted file mode 100644 index dbb1fc033e..0000000000 --- a/operator/slot_ticker/slotticker.go +++ /dev/null @@ -1,88 +0,0 @@ -package slot_ticker - -import ( - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" -) - -// The TTicker interface defines a type which can expose a -// receive-only channel firing slot events. -type TTicker interface { - C() <-chan phase0.Slot - Done() -} - -// SlotTicker is a special ticker for the beacon chain block. -// The channel emits over the slot interval, and ensures that -// the ticks are in line with the genesis time. This means that -// the duration between the ticks and the genesis time are always a -// multiple of the slot duration. -// In addition, the channel returns the new slot number. -type SlotTicker struct { - c chan phase0.Slot - done chan struct{} -} - -// C returns the ticker channel. Call Cancel afterwards to ensure -// that the goroutine exits cleanly. -func (s *SlotTicker) C() <-chan phase0.Slot { - return s.c -} - -// Done should be called to clean up the ticker. -func (s *SlotTicker) Done() { - go func() { - s.done <- struct{}{} - }() -} - -// NewSlotTicker starts and returns a new SlotTicker instance. -func NewSlotTicker(genesisTime time.Time, secondsPerSlot uint64) *SlotTicker { - if genesisTime.IsZero() { - panic("zero genesis time") - } - ticker := &SlotTicker{ - c: make(chan phase0.Slot), - done: make(chan struct{}), - } - ticker.start(genesisTime, secondsPerSlot, time.Since, time.Until, time.After) - return ticker -} - -func (s *SlotTicker) start( - genesisTime time.Time, - secondsPerSlot uint64, - since, until func(time.Time) time.Duration, - after func(time.Duration) <-chan time.Time) { - - d := time.Duration(secondsPerSlot) * time.Second - - go func() { - sinceGenesis := since(genesisTime) - - var nextTickTime time.Time - var slot phase0.Slot - if sinceGenesis < d { - // Handle when the current time is before the genesis time. - nextTickTime = genesisTime - slot = 0 - } else { - nextTick := sinceGenesis.Truncate(d) + d - nextTickTime = genesisTime.Add(nextTick) - slot = phase0.Slot(nextTick / d) - } - - for { - waitTime := until(nextTickTime) - select { - case <-after(waitTime): - s.c <- slot - slot++ - nextTickTime = nextTickTime.Add(d) - case <-s.done: - return - } - } - }() -} diff --git a/operator/slot_ticker/ticker.go b/operator/slot_ticker/ticker.go deleted file mode 100644 index 06cbe39604..0000000000 --- a/operator/slot_ticker/ticker.go +++ /dev/null @@ -1,84 +0,0 @@ -package slot_ticker - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/prysmaticlabs/prysm/v4/async/event" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/networkconfig" -) - -//go:generate mockgen -package=mocks -destination=./mocks/ticker.go -source=./ticker.go - -type Ticker interface { - // Start ticker process - Start(logger *zap.Logger) - // Subscribe to ticker chan - Subscribe(subscription chan phase0.Slot) event.Subscription -} - -type ticker struct { - ctx context.Context - network networkconfig.NetworkConfig - - // chan - feed *event.Feed -} - -// NewTicker returns Ticker struct pointer -func NewTicker(ctx context.Context, network networkconfig.NetworkConfig) Ticker { - return &ticker{ - ctx: ctx, - network: network, - feed: &event.Feed{}, - } -} - -// Start slot ticker -func (t *ticker) Start(logger *zap.Logger) { - genesisTime := time.Unix(int64(t.network.Beacon.MinGenesisTime()), 0) - slotTicker := NewSlotTicker(genesisTime, uint64(t.network.SlotDurationSec().Seconds())) - t.listenToTicker(logger, slotTicker.C()) -} - -// Subscribe will trigger every slot -func (t *ticker) Subscribe(subscription chan phase0.Slot) event.Subscription { - return t.feed.Subscribe(subscription) -} - -// listenToTicker loop over the given slot channel -func (t *ticker) listenToTicker(logger *zap.Logger, slots <-chan phase0.Slot) { - for currentSlot := range slots { - currentEpoch := t.network.Beacon.EstimatedEpochAtSlot(currentSlot) - buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, currentSlot, currentSlot%32+1) - logger.Debug("📅 slot ticker", zap.String("epoch_slot_seq", buildStr)) - if !t.genesisEpochEffective(logger) { - continue - } - // notify current slot to channel - _ = t.feed.Send(currentSlot) - } -} - -func (t *ticker) genesisEpochEffective(logger *zap.Logger) bool { - curSlot := t.network.Beacon.EstimatedCurrentSlot() - genSlot := t.network.Beacon.GetEpochFirstSlot(t.network.GenesisEpoch) - if curSlot < genSlot { - if t.network.Beacon.IsFirstSlotOfEpoch(curSlot) { - // wait until genesis epoch starts - curEpoch := t.network.Beacon.EstimatedCurrentEpoch() - gnsTime := t.network.Beacon.GetSlotStartTime(genSlot) - logger.Info("duties paused, will resume duties on genesis epoch", - zap.Uint64("genesis_epoch", uint64(t.network.GenesisEpoch)), - zap.Uint64("current_epoch", uint64(curEpoch)), - zap.String("genesis_time", gnsTime.Format(time.UnixDate))) - } - return false - } - - return true -} diff --git a/operator/slotticker/mocks/slotticker.go b/operator/slotticker/mocks/slotticker.go new file mode 100644 index 0000000000..f8e56df5b1 --- /dev/null +++ b/operator/slotticker/mocks/slotticker.go @@ -0,0 +1,115 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./slotticker.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + gomock "github.com/golang/mock/gomock" +) + +// MockSlotTicker is a mock of SlotTicker interface. +type MockSlotTicker struct { + ctrl *gomock.Controller + recorder *MockSlotTickerMockRecorder +} + +// MockSlotTickerMockRecorder is the mock recorder for MockSlotTicker. +type MockSlotTickerMockRecorder struct { + mock *MockSlotTicker +} + +// NewMockSlotTicker creates a new mock instance. +func NewMockSlotTicker(ctrl *gomock.Controller) *MockSlotTicker { + mock := &MockSlotTicker{ctrl: ctrl} + mock.recorder = &MockSlotTickerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { + return m.recorder +} + +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) +} + +// MockConfigProvider is a mock of ConfigProvider interface. +type MockConfigProvider struct { + ctrl *gomock.Controller + recorder *MockConfigProviderMockRecorder +} + +// MockConfigProviderMockRecorder is the mock recorder for MockConfigProvider. +type MockConfigProviderMockRecorder struct { + mock *MockConfigProvider +} + +// NewMockConfigProvider creates a new mock instance. +func NewMockConfigProvider(ctrl *gomock.Controller) *MockConfigProvider { + mock := &MockConfigProvider{ctrl: ctrl} + mock.recorder = &MockConfigProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConfigProvider) EXPECT() *MockConfigProviderMockRecorder { + return m.recorder +} + +// GetGenesisTime mocks base method. +func (m *MockConfigProvider) GetGenesisTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGenesisTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetGenesisTime indicates an expected call of GetGenesisTime. +func (mr *MockConfigProviderMockRecorder) GetGenesisTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGenesisTime", reflect.TypeOf((*MockConfigProvider)(nil).GetGenesisTime)) +} + +// SlotDurationSec mocks base method. +func (m *MockConfigProvider) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockConfigProviderMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockConfigProvider)(nil).SlotDurationSec)) +} diff --git a/operator/slotticker/slotticker.go b/operator/slotticker/slotticker.go new file mode 100644 index 0000000000..74e6511092 --- /dev/null +++ b/operator/slotticker/slotticker.go @@ -0,0 +1,96 @@ +package slotticker + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +//go:generate mockgen -package=mocks -destination=./mocks/slotticker.go -source=./slotticker.go + +type Provider func() SlotTicker + +type SlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot +} + +type ConfigProvider interface { + SlotDurationSec() time.Duration + GetGenesisTime() time.Time +} + +type Config struct { + slotDuration time.Duration + genesisTime time.Time +} + +func (cfg Config) SlotDurationSec() time.Duration { + return cfg.slotDuration +} + +func (cfg Config) GetGenesisTime() time.Time { + return cfg.genesisTime +} + +type slotTicker struct { + timer *time.Timer + slotDuration time.Duration + genesisTime time.Time + slot phase0.Slot +} + +// New returns a goroutine-free SlotTicker implementation which is not thread-safe. +func New(cfgProvider ConfigProvider) *slotTicker { + genesisTime := cfgProvider.GetGenesisTime() + slotDuration := cfgProvider.SlotDurationSec() + + now := time.Now() + timeSinceGenesis := now.Sub(genesisTime) + + var initialDelay time.Duration + if timeSinceGenesis < 0 { + // Genesis time is in the future + initialDelay = -timeSinceGenesis // Wait until the genesis time + } else { + slotsSinceGenesis := timeSinceGenesis / slotDuration + nextSlotStartTime := genesisTime.Add((slotsSinceGenesis + 1) * slotDuration) + initialDelay = time.Until(nextSlotStartTime) + } + + return &slotTicker{ + timer: time.NewTimer(initialDelay), + slotDuration: slotDuration, + genesisTime: genesisTime, + slot: 0, + } +} + +// Next returns a channel that signals when the next slot should start. +// Note: This function is not thread-safe and should be called in a serialized fashion. +// Make sure no concurrent calls happen, as it can result in unexpected behavior. +func (s *slotTicker) Next() <-chan time.Time { + timeSinceGenesis := time.Since(s.genesisTime) + if timeSinceGenesis < 0 { + return s.timer.C + } + if !s.timer.Stop() { + // try to drain the channel, but don't block if there's no value + select { + case <-s.timer.C: + default: + } + } + slotNumber := uint64(timeSinceGenesis / s.slotDuration) + nextSlotStartTime := s.genesisTime.Add(time.Duration(slotNumber+1) * s.slotDuration) + s.timer.Reset(time.Until(nextSlotStartTime)) + s.slot = phase0.Slot(slotNumber + 1) + return s.timer.C +} + +// Slot returns the current slot number. +// Note: Like the Next function, this method is also not thread-safe. +// It should be called in a serialized manner after calling Next. +func (s *slotTicker) Slot() phase0.Slot { + return s.slot +} diff --git a/operator/slotticker/slotticker_test.go b/operator/slotticker/slotticker_test.go new file mode 100644 index 0000000000..612e61d492 --- /dev/null +++ b/operator/slotticker/slotticker_test.go @@ -0,0 +1,179 @@ +package slotticker + +import ( + "sync" + "testing" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/cornelk/hashmap/assert" + "github.com/stretchr/testify/require" +) + +func TestSlotTicker(t *testing.T) { + const numTicks = 3 + slotDuration := 200 * time.Millisecond + // Set the genesis time such that we start from slot 1 + genesisTime := time.Now().Truncate(slotDuration).Add(-slotDuration) + + // Calculate the expected starting slot based on genesisTime + timeSinceGenesis := time.Since(genesisTime) + expectedSlot := phase0.Slot(timeSinceGenesis/slotDuration) + 1 + + ticker := New(Config{slotDuration, genesisTime}) + + for i := 0; i < numTicks; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, expectedSlot, slot) + expectedSlot++ + } +} + +func TestTickerInitialization(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + start := time.Now() + <-ticker.Next() + slot := ticker.Slot() + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + elapsed := time.Since(start) + assert.True(t, elapsed+buffer >= slotDuration, "First tick occurred too soon: %v", elapsed.String()) + require.Equal(t, phase0.Slot(1), slot) +} + +func TestSlotNumberConsistency(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + var lastSlot phase0.Slot + + for i := 0; i < 10; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, lastSlot+1, slot) + lastSlot = slot + } +} + +func TestGenesisInFuture(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now().Add(1 * time.Second) // Setting genesis time 1s in the future + + ticker := New(Config{slotDuration, genesisTime}) + start := time.Now() + + <-ticker.Next() + + // The first tick should occur after the genesis time + expectedFirstTickDuration := genesisTime.Sub(start) + actualFirstTickDuration := time.Since(start) + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + assert.True(t, actualFirstTickDuration+buffer >= expectedFirstTickDuration, "First tick occurred too soon. Expected at least: %v, but got: %v", expectedFirstTickDuration.String(), actualFirstTickDuration.String()) +} + +func TestBoundedDrift(t *testing.T) { + slotDuration := 20 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + ticks := 100 + + start := time.Now() + for i := 0; i < ticks; i++ { + <-ticker.Next() + } + expectedDuration := time.Duration(ticks) * slotDuration + elapsed := time.Since(start) + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed >= expectedDuration-buffer && elapsed <= expectedDuration+buffer, "Drifted too far from expected time. Expected: %v, Actual: %v", expectedDuration.String(), elapsed.String()) +} + +func TestMultipleSlotTickers(t *testing.T) { + const ( + numTickers = 1000 + ticksPerTimer = 3 + ) + + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + // Start the clock to time the full execution of all tickers + start := time.Now() + + var wg sync.WaitGroup + wg.Add(numTickers) + + for i := 0; i < numTickers; i++ { + go func() { + defer wg.Done() + ticker := New(Config{slotDuration, genesisTime}) + for j := 0; j < ticksPerTimer; j++ { + <-ticker.Next() + } + }() + } + + wg.Wait() + + // Calculate the total time taken for all tickers to complete their ticks + elapsed := time.Since(start) + expectedDuration := slotDuration * ticksPerTimer + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed <= expectedDuration+buffer, "Expected all tickers to complete within %v but took %v", expectedDuration.String(), elapsed.String()) +} + +func TestSlotSkipping(t *testing.T) { + const ( + numTicks = 100 + skipInterval = 10 // Introduce a delay every 10 ticks + slotDuration = 20 * time.Millisecond + ) + + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + var lastSlot phase0.Slot + for i := 1; i <= numTicks; i++ { // Starting loop from 1 for ease of skipInterval check + select { + case <-ticker.Next(): + slot := ticker.Slot() + + // Ensure we never receive slots out of order or repeatedly + require.Equal(t, slot, lastSlot+1, "Expected slot %d to be one more than the last slot %d", slot, lastSlot) + lastSlot = slot + + // If it's the 10th tick or any multiple thereof + if i%skipInterval == 0 { + // Introduce delay to skip a slot + time.Sleep(slotDuration) + + // Ensure the next slot we receive is exactly 2 slots ahead of the previous slot + <-ticker.Next() + slotAfterDelay := ticker.Slot() + require.Equal(t, lastSlot+2, slotAfterDelay, "Expected to skip a slot after introducing a delay") + + // Update the slot variable to use this new slot for further iterations + lastSlot = slotAfterDelay + } + + case <-time.After(2 * slotDuration): // Fail if we don't get a tick within a reasonable time + t.Fatalf("Did not receive expected tick for iteration %d", i) + } + } +} diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 17dcfddc82..604e1fbb55 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -5,6 +5,7 @@ import ( "crypto/rsa" "encoding/hex" "encoding/json" + "fmt" "sync" "time" @@ -22,8 +23,10 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" nodestorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" @@ -34,7 +37,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -76,7 +78,10 @@ type ControllerOptions struct { NewDecidedHandler qbftcontroller.NewDecidedHandler DutyRoles []spectypes.BeaconRole StorageMap *storage.QBFTStores - Metrics validatorMetrics + Metrics validator.Metrics + MessageValidator validation.MessageValidator + ValidatorsMap *validatorsmap.ValidatorsMap + VerifySignatures bool // worker flags WorkersCount int `yaml:"MsgWorkersCount" env:"MSG_WORKERS_COUNT" env-default:"256" env-description:"Number of goroutines to use for message workers"` @@ -88,7 +93,8 @@ type ControllerOptions struct { // it takes care of bootstrapping, updating and managing existing validators and their shares type Controller interface { StartValidators() - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetValidator(pubKey string) (*validator.Validator, bool) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) UpdateValidatorMetaDataLoop() @@ -104,7 +110,7 @@ type Controller interface { IndicesChangeChan() chan struct{} StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient common.Address) error @@ -120,7 +126,7 @@ type controller struct { context context.Context logger *zap.Logger - metrics validatorMetrics + metrics validator.Metrics sharesStorage registrystorage.Shares operatorsStorage registrystorage.Operators @@ -134,8 +140,8 @@ type controller struct { operatorData *registrystorage.OperatorData operatorDataMutex sync.RWMutex - validatorsMap *validatorsMap - validatorOptions *validator.Options + validatorsMap *validatorsmap.ValidatorsMap + validatorOptions validator.Options metadataUpdateInterval time.Duration @@ -144,6 +150,7 @@ type controller struct { messageRouter *messageRouter messageWorker *worker.Worker historySyncBatchSize int + messageValidator validation.MessageValidator // nonCommittees is a cache of initialized nonCommitteeValidator instances nonCommitteeValidators *ttlcache.Cache[spectypes.MessageID, *nonCommitteeValidator] @@ -156,7 +163,7 @@ type controller struct { // NewController creates a new validator controller instance func NewController(logger *zap.Logger, options ControllerOptions) Controller { - logger.Debug("setting validator controller") + logger.Debug("setting up validator controller", zap.Bool("message_validation_verify_signatures", options.VerifySignatures)) // lookup in a map that holds all relevant operators operatorsIDs := &sync.Map{} @@ -167,10 +174,10 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Buffer: options.QueueBufferSize, } - validatorOptions := &validator.Options{ //TODO add vars + validatorOptions := validator.Options{ //TODO add vars Network: options.Network, Beacon: options.Beacon, - BeaconNetwork: options.BeaconNetwork.BeaconNetwork, + BeaconNetwork: options.BeaconNetwork.GetNetwork(), Storage: options.StorageMap, //Share: nil, // set per validator Signer: options.KeyManager, @@ -181,6 +188,9 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Exporter: options.Exporter, BuilderProposals: options.BuilderProposals, GasLimit: options.GasLimit, + MessageValidator: options.MessageValidator, + Metrics: options.Metrics, + VerifySignatures: options.VerifySignatures, } // If full node, increase queue size to make enough room @@ -192,13 +202,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { } } - if options.Metrics == nil { - options.Metrics = nopMetrics{} + metrics := validator.Metrics(validator.NopMetrics{}) + if options.Metrics != nil { + metrics = options.Metrics } ctrl := controller{ logger: logger.Named(logging.NameController), - metrics: options.Metrics, + metrics: metrics, sharesStorage: options.RegistryStorage.Shares(), operatorsStorage: options.RegistryStorage, recipientsStorage: options.RegistryStorage, @@ -210,14 +221,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { keyManager: options.KeyManager, network: options.Network, - validatorsMap: newValidatorsMap(options.Context, validatorOptions), + validatorsMap: options.ValidatorsMap, validatorOptions: validatorOptions, metadataUpdateInterval: options.MetadataUpdateInterval, operatorsIDs: operatorsIDs, - messageRouter: newMessageRouter(), + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, workerCfg), historySyncBatchSize: options.HistorySyncBatchSize, @@ -226,6 +237,8 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { ), metadataLastUpdated: make(map[string]time.Time), indicesChange: make(chan struct{}), + + messageValidator: options.MessageValidator, } // Start automatic expired item deletion in nonCommitteeValidators. @@ -236,22 +249,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { // setupNetworkHandlers registers all the required handlers for sync protocols func (c *controller) setupNetworkHandlers() error { - syncHandlers := []*p2pprotocol.SyncHandler{ - p2pprotocol.WithHandler( - p2pprotocol.LastDecidedProtocol, - handlers.LastDecidedHandler(c.logger, c.ibftStorageMap, c.network), - ), - } - if c.validatorOptions.FullNode { - syncHandlers = append( - syncHandlers, - p2pprotocol.WithHandler( - p2pprotocol.DecidedHistoryProtocol, - // TODO: extract maxBatch to config - handlers.HistoryHandler(c.logger, c.ibftStorageMap, c.network, c.historySyncBatchSize), - ), - ) - } + syncHandlers := []*p2pprotocol.SyncHandler{} c.logger.Debug("setting up network handlers", zap.Int("count", len(syncHandlers)), zap.Bool("full_node", c.validatorOptions.FullNode), @@ -315,12 +313,12 @@ func (c *controller) handleRouterMessages() { pk := msg.GetID().GetPubKey() hexPK := hex.EncodeToString(pk) if v, ok := c.validatorsMap.GetValidator(hexPK); ok { - v.HandleMessage(c.logger, &msg) + v.HandleMessage(c.logger, msg) } else { if msg.MsgType != spectypes.SSVConsensusMsgType { continue // not supporting other types } - if !c.messageWorker.TryEnqueue(&msg) { // start to save non committee decided messages only post fork + if !c.messageWorker.TryEnqueue(msg) { // start to save non committee decided messages only post fork c.logger.Warn("Failed to enqueue post consensus message: buffer is full") } } @@ -336,7 +334,7 @@ var nonCommitteeValidatorTTLs = map[spectypes.BeaconRole]phase0.Slot{ spectypes.BNRoleSyncCommitteeContribution: 4, } -func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { +func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { // Get or create a nonCommitteeValidator for this MessageID, and lock it to prevent // other handlers from processing var ncv *nonCommitteeValidator @@ -354,7 +352,7 @@ func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { return errors.Errorf("could not find validator [%s]", hex.EncodeToString(msg.GetID().GetPubKey())) } - opts := *c.validatorOptions + opts := c.validatorOptions opts.SSVShare = share ncv = &nonCommitteeValidator{ NonCommitteeValidator: validator.NewNonCommitteeValidator(c.logger, msg.GetID(), opts), @@ -459,25 +457,7 @@ func (c *controller) setupNonCommitteeValidators() { pubKeys := make([][]byte, 0, len(nonCommitteeShares)) for _, validatorShare := range nonCommitteeShares { pubKeys = append(pubKeys, validatorShare.ValidatorPubKey) - - opts := *c.validatorOptions - opts.SSVShare = validatorShare - allRoles := []spectypes.BeaconRole{ - spectypes.BNRoleAttester, - spectypes.BNRoleAggregator, - spectypes.BNRoleProposer, - spectypes.BNRoleSyncCommittee, - spectypes.BNRoleSyncCommitteeContribution, - } - for _, role := range allRoles { - messageID := spectypes.NewMsgID(ssvtypes.GetDefaultDomain(), validatorShare.ValidatorPubKey, role) - err := c.network.SyncHighestDecided(messageID) - if err != nil { - c.logger.Error("failed to sync highest decided", zap.Error(err)) - } - } } - if len(pubKeys) > 0 { c.logger.Debug("updating metadata for non-committee validators", zap.Int("count", len(pubKeys))) if err := beaconprotocol.UpdateValidatorsMetadata(c.logger, pubKeys, c, c.beacon, c.onMetadataUpdated); err != nil { @@ -548,7 +528,7 @@ func (c *controller) UpdateValidatorMetadata(pk string, metadata *beaconprotocol return nil } -// GetValidator returns a validator instance from validatorsMap +// GetValidator returns a validator instance from ValidatorsMap func (c *controller) GetValidator(pubKey string) (*validator.Validator, bool) { return c.validatorsMap.GetValidator(pubKey) } @@ -565,7 +545,7 @@ func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) { logger.Error("could not create duty execute msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) if err != nil { logger.Error("could not decode duty execute msg", zap.Error(err)) return @@ -601,25 +581,36 @@ func CreateDutyExecuteMsg(duty *spectypes.Duty, pubKey phase0.BLSPubKey, domain }, nil } -// ActiveValidatorIndices fetches indices of validators who are either attesting or queued and +// CommitteeActiveIndices fetches indices of in-committee validators who are either attesting or queued and // whose activation epoch is not greater than the passed epoch. It logs a warning if an error occurs. -func (c *controller) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { - indices := make([]phase0.ValidatorIndex, 0, len(c.validatorsMap.validatorsMap)) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { - // Beacon node throws error when trying to fetch duties for non-existing validators. - if (v.Share.BeaconMetadata.IsAttesting() || v.Share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && - v.Share.BeaconMetadata.ActivationEpoch <= epoch { +func (c *controller) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + validators := c.validatorsMap.GetAll() + indices := make([]phase0.ValidatorIndex, 0, len(validators)) + for _, v := range validators { + if isShareActive(epoch)(v.Share) { indices = append(indices, v.Share.BeaconMetadata.Index) } - return nil - }) - if err != nil { - c.logger.Warn("failed to get all validators public keys", zap.Error(err)) } + return indices +} +func (c *controller) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + shares := c.sharesStorage.List(nil, isShareActive(epoch)) + indices := make([]phase0.ValidatorIndex, len(shares)) + for i, share := range shares { + indices[i] = share.BeaconMetadata.Index + } return indices } +func isShareActive(epoch phase0.Epoch) func(share *ssvtypes.SSVShare) bool { + return func(share *ssvtypes.SSVShare) bool { + return share != nil && share.BeaconMetadata != nil && + (share.BeaconMetadata.IsAttesting() || share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && + share.BeaconMetadata.ActivationEpoch <= epoch + } +} + // onMetadataUpdated is called when validator's metadata was updated func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.ValidatorMetadata) { if meta == nil { @@ -645,24 +636,15 @@ func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.Validator } } -// onShareRemove is called when a validator was removed -// TODO: think how we can make this function atomic (i.e. failing wouldn't stop the removal of the share) -func (c *controller) onShareRemove(pk string, removeSecret bool) error { - // remove from validatorsMap - v := c.validatorsMap.RemoveValidator(pk) +// onShareStop is called when a validator was removed or liquidated +func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { + // remove from ValidatorsMap + v := c.validatorsMap.RemoveValidator(hex.EncodeToString(pubKey)) // stop instance if v != nil { v.Stop() } - // remove the share secret from key-manager - if removeSecret { - if err := c.keyManager.RemoveShare(pk); err != nil { - return errors.Wrap(err, "could not remove share secret from key manager") - } - } - - return nil } func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { @@ -672,23 +654,56 @@ func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { } if err := c.setShareFeeRecipient(share, c.recipientsStorage.GetRecipientData); err != nil { - return false, errors.Wrap(err, "could not set share fee recipient") + return false, fmt.Errorf("could not set share fee recipient: %w", err) } // Start a committee validator. - v, err := c.validatorsMap.GetOrCreateValidator(c.logger.Named("validatorsMap"), share) - if err != nil { - return false, errors.Wrap(err, "could not get or create validator") + v, found := c.validatorsMap.GetValidator(hex.EncodeToString(share.ValidatorPubKey)) + if !found { + if !share.HasBeaconMetadata() { + return false, fmt.Errorf("beacon metadata is missing") + } + + // Share context with both the validator and the runners, + // so that when the validator is stopped, the runners are stopped as well. + ctx, cancel := context.WithCancel(c.context) + + opts := c.validatorOptions + opts.SSVShare = share + opts.DutyRunners = SetupRunners(ctx, c.logger, opts) + + v = validator.NewValidator(ctx, cancel, opts) + c.validatorsMap.CreateValidator(hex.EncodeToString(share.ValidatorPubKey), v) + + c.printShare(share, "setup validator done") + + } else { + c.printShare(v.Share, "get validator") } + return c.startValidator(v) } +func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { + committee := make([]string, len(s.Committee)) + for i, c := range s.Committee { + committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) + } + c.logger.Debug(msg, + fields.PubKey(s.ValidatorPubKey), + zap.Uint64("node_id", s.OperatorID), + zap.Strings("committee", committee), + fields.FeeRecipient(s.FeeRecipientAddress[:]), + ) +} + func (c *controller) setShareFeeRecipient(share *ssvtypes.SSVShare, getRecipientData GetRecipientDataFunc) error { - var feeRecipient bellatrix.ExecutionAddress data, found, err := getRecipientData(nil, share.OwnerAddress) if err != nil { return errors.Wrap(err, "could not get recipient data") } + + var feeRecipient bellatrix.ExecutionAddress if !found { c.logger.Debug("setting fee recipient to owner address", fields.Validator(share.ValidatorPubKey), fields.FeeRecipient(share.OwnerAddress.Bytes())) @@ -727,11 +742,6 @@ func (c *controller) UpdateValidatorMetaDataLoop() { // Prepare share filters. filters := []registrystorage.SharesFilter{} - // Filter for validators who belong to our operator. - if !c.validatorOptions.Exporter { - filters = append(filters, registrystorage.ByOperatorID(c.GetOperatorData().ID)) - } - // Filter for validators who are not liquidated. filters = append(filters, registrystorage.ByNotLiquidated()) @@ -807,9 +817,10 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, - Storage: options.Storage.Get(role), - Network: options.Network, - Timer: roundtimer.New(ctx, nil), + Storage: options.Storage.Get(role), + Network: options.Network, + Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + SignatureVerification: options.VerifySignatures, } config.ValueCheckF = valueCheckF @@ -823,29 +834,29 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt for _, role := range runnersType { switch role { case spectypes.BNRoleAttester: - valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) + valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleAttester, valCheck) - runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) + runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) case spectypes.BNRoleProposer: - proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey, options.BuilderProposals) + proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleProposer, proposedValueCheck) - runners[role] = runner.NewProposerRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) + runners[role] = runner.NewProposerRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) runners[role].(*runner.ProposerRunner).ProducesBlindedBlocks = options.BuilderProposals // apply blinded block flag case spectypes.BNRoleAggregator: - aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleAggregator, aggregatorValueCheckF) - runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) + runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) case spectypes.BNRoleSyncCommittee: - syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommittee, syncCommitteeValueCheckF) - runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) case spectypes.BNRoleSyncCommitteeContribution: - syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommitteeContribution, syncCommitteeContributionValueCheckF) - runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) case spectypes.BNRoleValidatorRegistration: qbftCtrl := buildController(spectypes.BNRoleValidatorRegistration, nil) - runners[role] = runner.NewValidatorRegistrationRunner(spectypes.PraterNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) + runners[role] = runner.NewValidatorRegistrationRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) } } return runners diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index 6a06733db2..2135d24ff3 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -7,17 +7,18 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" - - "github.com/bloxapp/ssv/logging" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/queue/worker" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -32,37 +33,45 @@ func TestHandleNonCommitteeMessages(t *testing.T) { var wg sync.WaitGroup - ctr.messageWorker.UseHandler(func(msg *spectypes.SSVMessage) error { + ctr.messageWorker.UseHandler(func(msg *queue.DecodedSSVMessage) error { wg.Done() return nil }) wg.Add(2) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.BNRoleAttester) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.BNRoleAttester) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateDecidedMessage(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateDecidedMessage(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateChangeRoundMsg(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateChangeRoundMsg(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: message.SSVSyncMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: message.SSVSyncMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: spectypes.SSVPartialSignatureMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) go func() { @@ -144,27 +153,25 @@ func TestGetIndices(t *testing.T) { logger := logging.TestLogger(t) ctr := setupController(logger, validators) - activeIndicesForCurrentEpoch := ctr.ActiveValidatorIndices(currentEpoch) + activeIndicesForCurrentEpoch := ctr.CommitteeActiveIndices(currentEpoch) require.Equal(t, 2, len(activeIndicesForCurrentEpoch)) // should return only active indices - activeIndicesForNextEpoch := ctr.ActiveValidatorIndices(currentEpoch + 1) + activeIndicesForNextEpoch := ctr.CommitteeActiveIndices(currentEpoch + 1) require.Equal(t, 3, len(activeIndicesForNextEpoch)) // should return including ValidatorStatePendingQueued } func setupController(logger *zap.Logger, validators map[string]*validator.Validator) controller { + validatorsMap := validatorsmap.New(context.TODO(), validatorsmap.WithInitialState(validators)) + return controller{ context: context.Background(), sharesStorage: nil, beacon: nil, keyManager: nil, shareEncryptionKeyProvider: nil, - validatorsMap: &validatorsMap{ - ctx: context.Background(), - lock: sync.RWMutex{}, - validatorsMap: validators, - }, - metadataUpdateInterval: 0, - messageRouter: newMessageRouter(), + validatorsMap: validatorsMap, + metadataUpdateInterval: 0, + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, &worker.Config{ Ctx: context.Background(), WorkersCount: 1, diff --git a/operator/validator/metrics.go b/operator/validator/metrics.go index 2ab82cbfc4..d9cb36e817 100644 --- a/operator/validator/metrics.go +++ b/operator/validator/metrics.go @@ -33,31 +33,3 @@ func (c *controller) reportValidatorStatus(pk []byte, meta *beacon.ValidatorMeta c.metrics.ValidatorUnknown(pk) } } - -type validatorMetrics interface { - ValidatorInactive(publicKey []byte) - ValidatorNoIndex(publicKey []byte) - ValidatorError(publicKey []byte) - ValidatorReady(publicKey []byte) - ValidatorNotActivated(publicKey []byte) - ValidatorExiting(publicKey []byte) - ValidatorSlashed(publicKey []byte) - ValidatorNotFound(publicKey []byte) - ValidatorPending(publicKey []byte) - ValidatorRemoved(publicKey []byte) - ValidatorUnknown(publicKey []byte) -} - -type nopMetrics struct{} - -func (n nopMetrics) ValidatorInactive([]byte) {} -func (n nopMetrics) ValidatorNoIndex([]byte) {} -func (n nopMetrics) ValidatorError([]byte) {} -func (n nopMetrics) ValidatorReady([]byte) {} -func (n nopMetrics) ValidatorNotActivated([]byte) {} -func (n nopMetrics) ValidatorExiting([]byte) {} -func (n nopMetrics) ValidatorSlashed([]byte) {} -func (n nopMetrics) ValidatorNotFound([]byte) {} -func (n nopMetrics) ValidatorPending([]byte) {} -func (n nopMetrics) ValidatorRemoved([]byte) {} -func (n nopMetrics) ValidatorUnknown([]byte) {} diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 6b743f6747..e7bad286b0 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -40,18 +40,32 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockController)(nil).AllActiveIndices), epoch) +} + +// CommitteeActiveIndices mocks base method. +func (m *MockController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockController)(nil).CommitteeActiveIndices), epoch) } // ExecuteDuty mocks base method. @@ -219,17 +233,17 @@ func (mr *MockControllerMockRecorder) StartValidators() *gomock.Call { } // StopValidator mocks base method. -func (m *MockController) StopValidator(publicKey []byte) error { +func (m *MockController) StopValidator(pubKey types.ValidatorPK) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopValidator", publicKey) + ret := m.ctrl.Call(m, "StopValidator", pubKey) ret0, _ := ret[0].(error) return ret0 } // StopValidator indicates an expected call of StopValidator. -func (mr *MockControllerMockRecorder) StopValidator(publicKey interface{}) *gomock.Call { +func (mr *MockControllerMockRecorder) StopValidator(pubKey interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), publicKey) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), pubKey) } // UpdateFeeRecipient mocks base method. diff --git a/operator/validator/router.go b/operator/validator/router.go index 67ef8860a9..e090cff3bc 100644 --- a/operator/validator/router.go +++ b/operator/validator/router.go @@ -1,34 +1,40 @@ package validator import ( - spectypes "github.com/bloxapp/ssv-spec/types" + "context" + "go.uber.org/zap" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) const bufSize = 1024 -func newMessageRouter() *messageRouter { +func newMessageRouter(logger *zap.Logger) *messageRouter { return &messageRouter{ - ch: make(chan spectypes.SSVMessage, bufSize), - msgID: commons.MsgID(), + logger: logger, + ch: make(chan *queue.DecodedSSVMessage, bufSize), + msgID: commons.MsgID(), } } type messageRouter struct { - ch chan spectypes.SSVMessage - msgID commons.MsgIDFunc + logger *zap.Logger + ch chan *queue.DecodedSSVMessage + msgID commons.MsgIDFunc } -func (r *messageRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { +func (r *messageRouter) Route(ctx context.Context, message *queue.DecodedSSVMessage) { select { + case <-ctx.Done(): + r.logger.Warn("context canceled, dropping message") case r.ch <- message: default: - logger.Warn("message router buffer is full. dropping message") + r.logger.Warn("message router buffer is full, dropping message") } } -func (r *messageRouter) GetMessageChan() <-chan spectypes.SSVMessage { +func (r *messageRouter) GetMessageChan() <-chan *queue.DecodedSSVMessage { return r.ch } diff --git a/operator/validator/router_test.go b/operator/validator/router_test.go index 787e2b988d..44b3798cac 100644 --- a/operator/validator/router_test.go +++ b/operator/validator/router_test.go @@ -10,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestRouter(t *testing.T) { @@ -19,7 +20,7 @@ func TestRouter(t *testing.T) { logger := logging.TestLogger(t) - router := newMessageRouter() + router := newMessageRouter(logger) expectedCount := 1000 count := 0 @@ -40,14 +41,17 @@ func TestRouter(t *testing.T) { }() for i := 0; i < expectedCount; i++ { - msg := spectypes.SSVMessage{ - MsgType: spectypes.MsgType(i % 3), - MsgID: spectypes.NewMsgID(types.GetDefaultDomain(), []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), - Data: []byte(fmt.Sprintf("data-%d", i)), + msg := &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.MsgType(i % 3), + MsgID: spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), + Data: []byte(fmt.Sprintf("data-%d", i)), + }, } - router.Route(logger, msg) + + router.Route(context.TODO(), msg) if i%2 == 0 { - go router.Route(logger, msg) + go router.Route(context.TODO(), msg) } } diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 0ea2191716..f3b967b5b3 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -1,17 +1,16 @@ package validator import ( - "encoding/hex" - "fmt" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/ethereum/go-ethereum/common" "go.uber.org/multierr" "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logger { @@ -20,7 +19,7 @@ func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logge With(fields...) } -func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { +func (c *controller) StartValidator(share *types.SSVShare) error { // logger := c.taskLogger("StartValidator", fields.PubKey(share.ValidatorPubKey)) // Since we don't yet have the Beacon metadata for this validator, @@ -30,41 +29,30 @@ func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { return nil } -func (c *controller) StopValidator(publicKey []byte) error { - logger := c.taskLogger("StopValidator", fields.PubKey(publicKey)) +func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { + logger := c.taskLogger("StopValidator", fields.PubKey(pubKey)) - c.metrics.ValidatorRemoved(publicKey) - if err := c.onShareRemove(hex.EncodeToString(publicKey), true); err != nil { - return err - } + c.metrics.ValidatorRemoved(pubKey) + c.onShareStop(pubKey) logger.Info("removed validator") return nil } -func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("LiquidateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error { + logger := c.taskLogger("LiquidateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) for _, share := range toLiquidate { - // we can't remove the share secret from key-manager - // due to the fact that after activating the validators (ClusterReactivated) - // we don't have the encrypted keys to decrypt the secret, but only the owner address - if err := c.onShareRemove(hex.EncodeToString(share.ValidatorPubKey), false); err != nil { - return err - } - logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("removed share") + c.onShareStop(share.ValidatorPubKey) + logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("liquidated share") } return nil } -func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("ReactivateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error { + logger := c.taskLogger("ReactivateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) var startedValidators int var errs error @@ -100,17 +88,14 @@ func (c *controller) UpdateFeeRecipient(owner, recipient common.Address) error { zap.String("owner", owner.String()), zap.String("fee_recipient", recipient.String())) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { + c.validatorsMap.ForEach(func(v *validator.Validator) bool { if v.Share.OwnerAddress == owner { v.Share.FeeRecipientAddress = recipient logger.Debug("updated recipient address") } - return nil + return true }) - if err != nil { - return fmt.Errorf("update validators map: %w", err) - } return nil } diff --git a/operator/validator/validators_map.go b/operator/validator/validators_map.go deleted file mode 100644 index 02d351f39c..0000000000 --- a/operator/validator/validators_map.go +++ /dev/null @@ -1,126 +0,0 @@ -package validator - -// TODO(nkryuchkov): remove old validator interface(s) -import ( - "context" - "encoding/hex" - "fmt" - "sync" - - "github.com/bloxapp/ssv/logging/fields" - - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -// validatorIterator is the function used to iterate over existing validators -type validatorIterator func(validator *validator.Validator) error - -// validatorsMap manages a collection of running validators -type validatorsMap struct { - ctx context.Context - - optsTemplate *validator.Options - - lock sync.RWMutex - validatorsMap map[string]*validator.Validator -} - -func newValidatorsMap(ctx context.Context, optsTemplate *validator.Options) *validatorsMap { - vm := validatorsMap{ - ctx: ctx, - lock: sync.RWMutex{}, - validatorsMap: make(map[string]*validator.Validator), - optsTemplate: optsTemplate, - } - - return &vm -} - -// ForEach loops over validators -func (vm *validatorsMap) ForEach(iterator validatorIterator) error { - vm.lock.RLock() - defer vm.lock.RUnlock() - - for _, val := range vm.validatorsMap { - if err := iterator(val); err != nil { - return err - } - } - return nil -} - -// GetValidator returns a validator -func (vm *validatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { - // main lock - vm.lock.RLock() - defer vm.lock.RUnlock() - - v, ok := vm.validatorsMap[pubKey] - - return v, ok -} - -// GetOrCreateValidator creates a new validator instance if not exist -func (vm *validatorsMap) GetOrCreateValidator(logger *zap.Logger, share *types.SSVShare) (*validator.Validator, error) { - // main lock - vm.lock.Lock() - defer vm.lock.Unlock() - - pubKey := hex.EncodeToString(share.ValidatorPubKey) - if v, ok := vm.validatorsMap[pubKey]; !ok { - if !share.HasBeaconMetadata() { - return nil, fmt.Errorf("beacon metadata is missing") - } - opts := *vm.optsTemplate - opts.SSVShare = share - - // Share context with both the validator and the runners, - // so that when the validator is stopped, the runners are stopped as well. - ctx, cancel := context.WithCancel(vm.ctx) - opts.DutyRunners = SetupRunners(ctx, logger, opts) - vm.validatorsMap[pubKey] = validator.NewValidator(ctx, cancel, opts) - - printShare(share, logger, "setup validator done") - opts.SSVShare = nil - } else { - printShare(v.Share, logger, "get validator") - } - - return vm.validatorsMap[pubKey], nil -} - -// RemoveValidator removes a validator instance from the map -func (vm *validatorsMap) RemoveValidator(pubKey string) *validator.Validator { - if v, found := vm.GetValidator(pubKey); found { - vm.lock.Lock() - defer vm.lock.Unlock() - - delete(vm.validatorsMap, pubKey) - return v - } - return nil -} - -// Size returns the number of validators in the map -func (vm *validatorsMap) Size() int { - vm.lock.RLock() - defer vm.lock.RUnlock() - - return len(vm.validatorsMap) -} - -func printShare(s *types.SSVShare, logger *zap.Logger, msg string) { - committee := make([]string, len(s.Committee)) - for i, c := range s.Committee { - committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) - } - logger.Debug(msg, - fields.PubKey(s.ValidatorPubKey), - zap.Uint64("node_id", s.OperatorID), - zap.Strings("committee", committee), - fields.FeeRecipient(s.FeeRecipientAddress[:]), - ) -} diff --git a/operator/validatorsmap/validators_map.go b/operator/validatorsmap/validators_map.go new file mode 100644 index 0000000000..badc404b1c --- /dev/null +++ b/operator/validatorsmap/validators_map.go @@ -0,0 +1,110 @@ +package validatorsmap + +// TODO(nkryuchkov): remove old validator interface(s) +import ( + "context" + "sync" + + "github.com/bloxapp/ssv/protocol/v2/ssv/validator" +) + +// validatorIterator is the function used to iterate over existing validators +type validatorIterator func(validator *validator.Validator) bool + +// ValidatorsMap manages a collection of running validators +type ValidatorsMap struct { + ctx context.Context + lock sync.RWMutex + validatorsMap map[string]*validator.Validator +} + +func New(ctx context.Context, opts ...Option) *ValidatorsMap { + vm := &ValidatorsMap{ + ctx: ctx, + lock: sync.RWMutex{}, + validatorsMap: make(map[string]*validator.Validator), + } + + for _, opt := range opts { + opt(vm) + } + + return vm +} + +// Option defines EventSyncer configuration option. +type Option func(*ValidatorsMap) + +// WithInitialState sets initial state +func WithInitialState(state map[string]*validator.Validator) Option { + return func(vm *ValidatorsMap) { + vm.validatorsMap = state + } +} + +// ForEach loops over validators +func (vm *ValidatorsMap) ForEach(iterator validatorIterator) bool { + vm.lock.RLock() + defer vm.lock.RUnlock() + + for _, val := range vm.validatorsMap { + if !iterator(val) { + return false + } + } + return true +} + +// GetAll returns all validators. +func (vm *ValidatorsMap) GetAll() []*validator.Validator { + vm.lock.RLock() + defer vm.lock.RUnlock() + + var validators []*validator.Validator + for _, val := range vm.validatorsMap { + validators = append(validators, val) + } + + return validators +} + +// GetValidator returns a validator +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { + vm.lock.RLock() + defer vm.lock.RUnlock() + + v, ok := vm.validatorsMap[pubKey] + + return v, ok +} + +// CreateValidator creates a new validator instance +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) CreateValidator(pubKey string, v *validator.Validator) { + vm.lock.Lock() + defer vm.lock.Unlock() + + vm.validatorsMap[pubKey] = v +} + +// RemoveValidator removes a validator instance from the map +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) RemoveValidator(pubKey string) *validator.Validator { + if v, found := vm.GetValidator(pubKey); found { + vm.lock.Lock() + defer vm.lock.Unlock() + + delete(vm.validatorsMap, pubKey) + return v + } + return nil +} + +// Size returns the number of validators in the map +func (vm *ValidatorsMap) Size() int { + vm.lock.RLock() + defer vm.lock.RUnlock() + + return len(vm.validatorsMap) +} diff --git a/protocol/v2/blockchain/beacon/mock_client.go b/protocol/v2/blockchain/beacon/mock_client.go index 7360109bd1..2c8fa64f4d 100644 --- a/protocol/v2/blockchain/beacon/mock_client.go +++ b/protocol/v2/blockchain/beacon/mock_client.go @@ -643,6 +643,20 @@ func (mr *MockBeaconNodeMockRecorder) SubmitValidatorRegistration(pubkey, feeRec return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitValidatorRegistration", reflect.TypeOf((*MockBeaconNode)(nil).SubmitValidatorRegistration), pubkey, feeRecipient, sig) } +// SubmitVoluntaryExit mocks base method. +func (m *MockBeaconNode) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitVoluntaryExit", voluntaryExit, sig) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitVoluntaryExit indicates an expected call of SubmitVoluntaryExit. +func (mr *MockBeaconNodeMockRecorder) SubmitVoluntaryExit(voluntaryExit, sig interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitVoluntaryExit", reflect.TypeOf((*MockBeaconNode)(nil).SubmitVoluntaryExit), voluntaryExit, sig) +} + // SyncCommitteeDuties mocks base method. func (m *MockBeaconNode) SyncCommitteeDuties(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.SyncCommitteeDuty, error) { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/mocks/network.go b/protocol/v2/blockchain/beacon/mocks/network.go index 0a129035f2..65c124cbf1 100644 --- a/protocol/v2/blockchain/beacon/mocks/network.go +++ b/protocol/v2/blockchain/beacon/mocks/network.go @@ -233,6 +233,20 @@ func (mr *MockBeaconNetworkMockRecorder) GetNetwork() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetwork", reflect.TypeOf((*MockBeaconNetwork)(nil).GetNetwork)) } +// GetSlotEndTime mocks base method. +func (m *MockBeaconNetwork) GetSlotEndTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotEndTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotEndTime indicates an expected call of GetSlotEndTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotEndTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotEndTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotEndTime), slot) +} + // GetSlotStartTime mocks base method. func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/network.go b/protocol/v2/blockchain/beacon/network.go index e9f0c240c4..965890540f 100644 --- a/protocol/v2/blockchain/beacon/network.go +++ b/protocol/v2/blockchain/beacon/network.go @@ -29,6 +29,7 @@ type BeaconNetwork interface { EpochStartTime(epoch phase0.Epoch) time.Time GetSlotStartTime(slot phase0.Slot) time.Time + GetSlotEndTime(slot phase0.Slot) time.Time IsFirstSlotOfEpoch(slot phase0.Slot) bool GetEpochFirstSlot(epoch phase0.Epoch) phase0.Slot @@ -82,6 +83,11 @@ func (n Network) GetSlotStartTime(slot phase0.Slot) time.Time { return start } +// GetSlotEndTime returns the end time for the given slot +func (n Network) GetSlotEndTime(slot phase0.Slot) time.Time { + return n.GetSlotStartTime(slot + 1) +} + // EstimatedCurrentSlot returns the estimation of the current slot func (n Network) EstimatedCurrentSlot() phase0.Slot { return n.EstimatedSlotAtTime(time.Now().Unix()) diff --git a/protocol/v2/blockchain/beacon/network_test.go b/protocol/v2/blockchain/beacon/network_test.go new file mode 100644 index 0000000000..a5646bf36a --- /dev/null +++ b/protocol/v2/blockchain/beacon/network_test.go @@ -0,0 +1,19 @@ +package beacon + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestNetwork_GetSlotEndTime(t *testing.T) { + slot := phase0.Slot(1) + + n := NewNetwork(spectypes.PraterNetwork) + slotStart := n.GetSlotStartTime(slot) + slotEnd := n.GetSlotEndTime(slot) + + require.Equal(t, n.SlotDurationSec(), slotEnd.Sub(slotStart)) +} diff --git a/protocol/v2/p2p/network.go b/protocol/v2/p2p/network.go index 8e9f99a78d..bd201dddda 100644 --- a/protocol/v2/p2p/network.go +++ b/protocol/v2/p2p/network.go @@ -132,21 +132,6 @@ func WithHandler(protocol SyncProtocol, handler RequestHandler) *SyncHandler { } } -// Syncer holds the interface for syncing data from other peers -type Syncer interface { - specqbft.Syncer - // GetHistory sync the given range from a set of peers that supports history for the given identifier - // it accepts a list of targets for the request. - GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]SyncResult, specqbft.Height, error) - - // RegisterHandlers registers handler for the given protocol - RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) - - // LastDecided fetches last decided from a random set of peers - // TODO: replace with specqbft.SyncHighestDecided - LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]SyncResult, error) -} - // MsgValidationResult helps other components to report message validation with a generic results scheme type MsgValidationResult int32 @@ -173,6 +158,8 @@ type ValidationReporting interface { type Network interface { Subscriber Broadcaster - Syncer ValidationReporting + + // RegisterHandlers registers handler for the given protocol + RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) } diff --git a/protocol/v2/qbft/config.go b/protocol/v2/qbft/config.go index 580b3b03e2..21aae3df6b 100644 --- a/protocol/v2/qbft/config.go +++ b/protocol/v2/qbft/config.go @@ -3,6 +3,8 @@ package qbft import ( specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) @@ -24,18 +26,21 @@ type IConfig interface { // GetStorage returns a storage instance GetStorage() qbftstorage.QBFTStore // GetTimer returns round timer - GetTimer() specqbft.Timer + GetTimer() roundtimer.Timer + // VerifySignatures returns if signature is checked + VerifySignatures() bool } type Config struct { - Signer spectypes.SSVSigner - SigningPK []byte - Domain spectypes.DomainType - ValueCheckF specqbft.ProposedValueCheckF - ProposerF specqbft.ProposerF - Storage qbftstorage.QBFTStore - Network specqbft.Network - Timer specqbft.Timer + Signer spectypes.SSVSigner + SigningPK []byte + Domain spectypes.DomainType + ValueCheckF specqbft.ProposedValueCheckF + ProposerF specqbft.ProposerF + Storage qbftstorage.QBFTStore + Network specqbft.Network + Timer roundtimer.Timer + SignatureVerification bool } // GetSigner returns a Signer instance @@ -74,6 +79,10 @@ func (c *Config) GetStorage() qbftstorage.QBFTStore { } // GetTimer returns round timer -func (c *Config) GetTimer() specqbft.Timer { +func (c *Config) GetTimer() roundtimer.Timer { return c.Timer } + +func (c *Config) VerifySignatures() bool { + return c.SignatureVerification +} diff --git a/protocol/v2/qbft/controller/controller.go b/protocol/v2/qbft/controller/controller.go index 84abc6600f..dd786dc993 100644 --- a/protocol/v2/qbft/controller/controller.go +++ b/protocol/v2/qbft/controller/controller.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "encoding/json" + "fmt" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -24,14 +25,12 @@ type Controller struct { Identifier []byte Height specqbft.Height // incremental Height for InstanceContainer // StoredInstances stores the last HistoricalInstanceCapacity in an array for message processing purposes. - StoredInstances InstanceContainer - // FutureMsgsContainer holds all msgs from a higher height - FutureMsgsContainer map[spectypes.OperatorID]specqbft.Height // maps msg signer to height of higher height received msgs - Domain spectypes.DomainType - Share *spectypes.Share - NewDecidedHandler NewDecidedHandler `json:"-"` - config qbft.IConfig - fullNode bool + StoredInstances InstanceContainer + Domain spectypes.DomainType + Share *spectypes.Share + NewDecidedHandler NewDecidedHandler `json:"-"` + config qbft.IConfig + fullNode bool } func NewController( @@ -42,14 +41,13 @@ func NewController( fullNode bool, ) *Controller { return &Controller{ - Identifier: identifier, - Height: specqbft.FirstHeight, - Domain: domain, - Share: share, - StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), - FutureMsgsContainer: make(map[spectypes.OperatorID]specqbft.Height), - config: config, - fullNode: fullNode, + Identifier: identifier, + Height: specqbft.FirstHeight, + Domain: domain, + Share: share, + StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), + config: config, + fullNode: fullNode, } } @@ -100,10 +98,9 @@ func (c *Controller) ProcessMsg(logger *zap.Logger, msg *specqbft.SignedMessage) if IsDecidedMsg(c.Share, msg) { return c.UponDecided(logger, msg) } else if c.isFutureMessage(msg) { - return c.UponFutureMsg(logger, msg) - } else { - return c.UponExistingInstanceMsg(logger, msg) + return nil, fmt.Errorf("future msg from height, could not process") } + return c.UponExistingInstanceMsg(logger, msg) } func (c *Controller) UponExistingInstanceMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { diff --git a/protocol/v2/qbft/controller/controller_test.go b/protocol/v2/qbft/controller/controller_test.go index 35c7a39d31..cd119c9d86 100644 --- a/protocol/v2/qbft/controller/controller_test.go +++ b/protocol/v2/qbft/controller/controller_test.go @@ -1,11 +1,18 @@ package controller import ( + "encoding/json" "testing" - "github.com/bloxapp/ssv/protocol/v2/qbft" - + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/bloxapp/ssv/protocol/v2/types" ) func TestController_Marshaling(t *testing.T) { @@ -25,3 +32,60 @@ func TestController_Marshaling(t *testing.T) { require.NoError(t, err) require.EqualValues(t, byts, bytsDecoded) } + +func TestController_OnTimeoutWithRoundCheck(t *testing.T) { + // Initialize logger + logger := logging.TestLogger(t) + + testConfig := &qbft.Config{ + Signer: spectestingutils.NewTestingKeyManager(), + Network: spectestingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + } + + share := spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()) + inst := instance.NewInstance( + testConfig, + share, + []byte{1, 2, 3, 4}, + specqbft.FirstHeight, + ) + + // Initialize Controller + contr := &Controller{} + + // Initialize EventMsg for the test + timeoutData := types.TimeoutData{ + Height: specqbft.FirstHeight, + Round: specqbft.FirstRound, + } + + data, err := json.Marshal(timeoutData) + require.NoError(t, err) + + msg := &types.EventMsg{ + Type: types.Timeout, + Data: data, + } + + // Simulate a scenario where the instance is at a higher round + inst.State.Round = specqbft.Round(2) + contr.StoredInstances.addNewInstance(inst) + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did not bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should not bump") + + // Simulate a scenario where the instance is at the same or lower round + inst.State.Round = specqbft.FirstRound + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should bump") +} diff --git a/protocol/v2/qbft/controller/decided.go b/protocol/v2/qbft/controller/decided.go index 6c239a5a90..f9b694bc8e 100644 --- a/protocol/v2/qbft/controller/decided.go +++ b/protocol/v2/qbft/controller/decided.go @@ -67,8 +67,6 @@ func (c *Controller) UponDecided(logger *zap.Logger, msg *specqbft.SignedMessage } if isFutureDecided { - // sync gap - c.GetConfig().GetNetwork().SyncDecidedByRange(spectypes.MessageIDFromBytes(c.Identifier), c.Height, msg.Message.Height) // bump height c.Height = msg.Message.Height } diff --git a/protocol/v2/qbft/controller/future_msg.go b/protocol/v2/qbft/controller/future_msg.go deleted file mode 100644 index 30a205ff6e..0000000000 --- a/protocol/v2/qbft/controller/future_msg.go +++ /dev/null @@ -1,76 +0,0 @@ -package controller - -import ( - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -func (c *Controller) UponFutureMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { - if err := ValidateFutureMsg(c.GetConfig(), msg, c.Share.Committee); err != nil { - return nil, errors.Wrap(err, "invalid future msg") - } - if !c.addHigherHeightMsg(msg) { - return nil, errors.New("discarded future msg") - } - if c.f1SyncTrigger() { - logger.Debug("🔀 triggered f+1 sync", - zap.Uint64("ctrl_height", uint64(c.Height)), - zap.Uint64("msg_height", uint64(msg.Message.Height))) - return nil, c.GetConfig().GetNetwork().SyncHighestDecided(spectypes.MessageIDFromBytes(c.Identifier)) - } - return nil, nil -} - -func ValidateFutureMsg( - config qbft.IConfig, - msg *specqbft.SignedMessage, - operators []*spectypes.Operator, -) error { - if err := msg.Validate(); err != nil { - return errors.Wrap(err, "invalid decided msg") - } - - if len(msg.GetSigners()) != 1 { - return errors.New("allows 1 signer") - } - - // verify signature - if err := types.VerifyByOperators(msg.Signature, msg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") - } - - return nil -} - -// addHigherHeightMsg verifies msg, cleanup queue and adds the message if unique signer -func (c *Controller) addHigherHeightMsg(msg *specqbft.SignedMessage) bool { - // cleanup lower height msgs - cleanedQueue := make(map[spectypes.OperatorID]specqbft.Height) - signerExists := false - for signer, height := range c.FutureMsgsContainer { - if height <= c.Height { - continue - } - - if signer == msg.GetSigners()[0] { - signerExists = true - } - cleanedQueue[signer] = height - } - - if !signerExists { - cleanedQueue[msg.GetSigners()[0]] = msg.Message.Height - } - c.FutureMsgsContainer = cleanedQueue - return !signerExists -} - -// f1SyncTrigger returns true if received f+1 higher height messages from unique signers -func (c *Controller) f1SyncTrigger() bool { - return c.Share.HasPartialQuorum(len(c.FutureMsgsContainer)) -} diff --git a/protocol/v2/qbft/controller/timer.go b/protocol/v2/qbft/controller/timer.go index f073fa813c..fa3ff1e4db 100644 --- a/protocol/v2/qbft/controller/timer.go +++ b/protocol/v2/qbft/controller/timer.go @@ -19,8 +19,13 @@ func (c *Controller) OnTimeout(logger *zap.Logger, msg types.EventMsg) error { if instance == nil { return errors.New("instance is nil") } - decided, _ := instance.IsDecided() - if decided { + + if timeoutData.Round < instance.State.Round { + logger.Debug("timeout for old round", zap.Uint64("timeout round", uint64(timeoutData.Round)), zap.Uint64("instance round", uint64(instance.State.Round))) + return nil + } + + if decided, _ := instance.IsDecided(); decided { return nil } return instance.UponRoundTimeout(logger) diff --git a/protocol/v2/qbft/instance/commit.go b/protocol/v2/qbft/instance/commit.go index 5620602ea6..53d4f5855e 100644 --- a/protocol/v2/qbft/instance/commit.go +++ b/protocol/v2/qbft/instance/commit.go @@ -158,9 +158,10 @@ func BaseCommitValidation( return errors.Wrap(err, "signed commit invalid") } - // verify signature - if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/instance.go b/protocol/v2/qbft/instance/instance.go index 2513268e25..f0d99e92cd 100644 --- a/protocol/v2/qbft/instance/instance.go +++ b/protocol/v2/qbft/instance/instance.go @@ -66,7 +66,7 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh i.State.Height = height i.metrics.StartStage() - i.config.GetTimer().TimeoutForRound(specqbft.FirstRound) + i.config.GetTimer().TimeoutForRound(height, specqbft.FirstRound) logger = logger.With( fields.Round(i.State.Round), @@ -95,13 +95,9 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh } func (i *Instance) Broadcast(logger *zap.Logger, msg *specqbft.SignedMessage) error { - // logger.Debug("Broadcast", - // zap.Any("MsgType", msg.Message.MsgType), - // fields.Round(msg.Message.Round), - // zap.Any("DataRound", msg.Message.DataRound), - // fields.Height(msg.Message.Height), - // ) - + if !i.CanProcessMessages() { + return errors.New("instance stopped processing messages") + } byts, err := msg.Encode() if err != nil { return errors.Wrap(err, "could not encode message") diff --git a/protocol/v2/qbft/instance/marshalutils.go b/protocol/v2/qbft/instance/marshalutils.go new file mode 100644 index 0000000000..ba76e75453 --- /dev/null +++ b/protocol/v2/qbft/instance/marshalutils.go @@ -0,0 +1,47 @@ +package instance + +import "encoding/json" + +/////////////////////// JSON Marshalling for Tests /////////////////////// + +// region: JSON Marshalling for Instance + +// MarshalJSON is a custom JSON marshaller for Instance +func (i *Instance) MarshalJSON() ([]byte, error) { + type Alias Instance + if i.forceStop { + return json.Marshal(&struct { + ForceStop bool `json:"forceStop"` + *Alias + }{ + ForceStop: i.forceStop, + Alias: (*Alias)(i), + }) + } else { + return json.Marshal(&struct { + *Alias + }{ + Alias: (*Alias)(i), + }) + } +} + +// UnmarshalJSON is a custom JSON unmarshaller for Instance +func (i *Instance) UnmarshalJSON(data []byte) error { + type Alias Instance + aux := &struct { + ForceStop *bool `json:"forceStop,omitempty"` + *Alias + }{ + Alias: (*Alias)(i), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ForceStop != nil { + i.forceStop = *aux.ForceStop + } + return nil +} + +// endregion: JSON Marshalling for Instance diff --git a/protocol/v2/qbft/instance/metrics.go b/protocol/v2/qbft/instance/metrics.go index e2598671ad..e32e49a872 100644 --- a/protocol/v2/qbft/instance/metrics.go +++ b/protocol/v2/qbft/instance/metrics.go @@ -2,13 +2,13 @@ package instance import ( "encoding/hex" - "go.uber.org/zap" "time" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/zap" ) var ( @@ -50,7 +50,7 @@ func newMetrics(msgID spectypes.MessageID) *metrics { proposalDuration: metricsStageDuration.WithLabelValues("proposal", hexPubKey), prepareDuration: metricsStageDuration.WithLabelValues("prepare", hexPubKey), commitDuration: metricsStageDuration.WithLabelValues("commit", hexPubKey), - round: metricsRound.WithLabelValues("validator", hexPubKey), + round: metricsRound.WithLabelValues(msgID.GetRoleType().String(), hexPubKey), } } diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index 7714771b88..55748b33c2 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -159,8 +159,10 @@ func validSignedPrepareForHeightRoundAndRoot( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/proposal.go b/protocol/v2/qbft/instance/proposal.go index a417c04fc4..a4b5303ada 100644 --- a/protocol/v2/qbft/instance/proposal.go +++ b/protocol/v2/qbft/instance/proposal.go @@ -10,7 +10,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) // uponProposal process proposal message @@ -33,7 +33,7 @@ func (i *Instance) uponProposal(logger *zap.Logger, signedProposal *specqbft.Sig // A future justified proposal should bump us into future round and reset timer if signedProposal.Message.Round > i.State.Round { - i.config.GetTimer().TimeoutForRound(signedProposal.Message.Round) + i.config.GetTimer().TimeoutForRound(signedProposal.Message.Height, signedProposal.Message.Round) } i.bumpToRound(newRound) @@ -77,8 +77,10 @@ func isValidProposal( if len(signedProposal.GetSigners()) != 1 { return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := ssvtypes.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if !signedProposal.MatchedSigners([]spectypes.OperatorID{proposer(state, config, signedProposal.Message.Round)}) { return errors.New("proposal leader invalid") @@ -121,6 +123,30 @@ func isValidProposal( return errors.New("proposal is not valid with current state") } +func IsProposalJustification( + config qbft.IConfig, + share *ssvtypes.SSVShare, + roundChangeMsgs []*specqbft.SignedMessage, + prepareMsgs []*specqbft.SignedMessage, + height specqbft.Height, + round specqbft.Round, + fullData []byte, +) error { + return isProposalJustification( + &specqbft.State{ + Share: &share.Share, + Height: height, + }, + config, + roundChangeMsgs, + prepareMsgs, + height, + round, + fullData, + func(data []byte) error { return nil }, + ) +} + // isProposalJustification returns nil if the proposal and round change messages are valid and justify a proposal message for the provided round, value and leader func isProposalJustification( state *specqbft.State, @@ -256,7 +282,7 @@ func CreateProposal(state *specqbft.State, config qbft.IConfig, fullData []byte, } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing proposal msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/round_change.go b/protocol/v2/qbft/instance/round_change.go index 5b0de2e3c9..00cd676b3d 100644 --- a/protocol/v2/qbft/instance/round_change.go +++ b/protocol/v2/qbft/instance/round_change.go @@ -30,8 +30,11 @@ func (i *Instance) uponRoundChange( return nil // UponCommit was already called } - logger = logger.With(fields.Round(i.State.Round), - fields.Height(i.State.Height)) + logger = logger.With( + fields.Round(i.State.Round), + fields.Height(i.State.Height), + zap.Uint64("msg_round", uint64(signedRoundChange.Message.Round)), + ) logger.Debug("🔄 got round change", fields.Root(signedRoundChange.Message.Root), @@ -85,7 +88,9 @@ func (i *Instance) uponRoundChange( func (i *Instance) uponChangeRoundPartialQuorum(logger *zap.Logger, newRound specqbft.Round, instanceStartValue []byte) error { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) + roundChange, err := CreateRoundChange(i.State, i.config, newRound, instanceStartValue) if err != nil { return errors.Wrap(err, "failed to create round change message") @@ -247,8 +252,10 @@ func validRoundChangeForData( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if err := signedMsg.Message.Validate(); err != nil { @@ -377,7 +384,7 @@ func CreateRoundChange(state *specqbft.State, config qbft.IConfig, newRound spec } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing round change msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/timeout.go b/protocol/v2/qbft/instance/timeout.go index ee8e9248b7..62ae4c784c 100644 --- a/protocol/v2/qbft/instance/timeout.go +++ b/protocol/v2/qbft/instance/timeout.go @@ -1,9 +1,10 @@ package instance import ( - "github.com/bloxapp/ssv/logging/fields" "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging/fields" ) var CutoffRound = 15 // stop processing instances after 8*2+120*6 = 14.2 min (~ 2 epochs) @@ -22,7 +23,7 @@ func (i *Instance) UponRoundTimeout(logger *zap.Logger) error { defer func() { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) }() roundChange, err := CreateRoundChange(i.State, i.config, newRound, i.StartValue) diff --git a/protocol/v2/qbft/roundtimer/mocks/timer.go b/protocol/v2/qbft/roundtimer/mocks/timer.go new file mode 100644 index 0000000000..2a691f9ab6 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/mocks/timer.go @@ -0,0 +1,100 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./timer.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + qbft "github.com/bloxapp/ssv-spec/qbft" + gomock "github.com/golang/mock/gomock" +) + +// MockTimer is a mock of Timer interface. +type MockTimer struct { + ctrl *gomock.Controller + recorder *MockTimerMockRecorder +} + +// MockTimerMockRecorder is the mock recorder for MockTimer. +type MockTimerMockRecorder struct { + mock *MockTimer +} + +// NewMockTimer creates a new mock instance. +func NewMockTimer(ctrl *gomock.Controller) *MockTimer { + mock := &MockTimer{ctrl: ctrl} + mock.recorder = &MockTimerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTimer) EXPECT() *MockTimerMockRecorder { + return m.recorder +} + +// TimeoutForRound mocks base method. +func (m *MockTimer) TimeoutForRound(height qbft.Height, round qbft.Round) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TimeoutForRound", height, round) +} + +// TimeoutForRound indicates an expected call of TimeoutForRound. +func (mr *MockTimerMockRecorder) TimeoutForRound(height, round interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeoutForRound", reflect.TypeOf((*MockTimer)(nil).TimeoutForRound), height, round) +} + +// MockBeaconNetwork is a mock of BeaconNetwork interface. +type MockBeaconNetwork struct { + ctrl *gomock.Controller + recorder *MockBeaconNetworkMockRecorder +} + +// MockBeaconNetworkMockRecorder is the mock recorder for MockBeaconNetwork. +type MockBeaconNetworkMockRecorder struct { + mock *MockBeaconNetwork +} + +// NewMockBeaconNetwork creates a new mock instance. +func NewMockBeaconNetwork(ctrl *gomock.Controller) *MockBeaconNetwork { + mock := &MockBeaconNetwork{ctrl: ctrl} + mock.recorder = &MockBeaconNetworkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBeaconNetwork) EXPECT() *MockBeaconNetworkMockRecorder { + return m.recorder +} + +// GetSlotStartTime mocks base method. +func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotStartTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotStartTime indicates an expected call of GetSlotStartTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotStartTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotStartTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotStartTime), slot) +} + +// SlotDurationSec mocks base method. +func (m *MockBeaconNetwork) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockBeaconNetworkMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockBeaconNetwork)(nil).SlotDurationSec)) +} diff --git a/protocol/v2/qbft/roundtimer/testing_timer.go b/protocol/v2/qbft/roundtimer/testing_timer.go new file mode 100644 index 0000000000..310a072aa3 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/testing_timer.go @@ -0,0 +1,23 @@ +package roundtimer + +import specqbft "github.com/bloxapp/ssv-spec/qbft" + +type TimerState struct { + Timeouts int + Round specqbft.Round +} + +type TestQBFTTimer struct { + State TimerState +} + +func NewTestingTimer() Timer { + return &TestQBFTTimer{ + State: TimerState{}, + } +} + +func (t *TestQBFTTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { + t.State.Timeouts++ + t.State.Round = round +} diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index df0463e695..fde166f3dc 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -6,25 +6,36 @@ import ( "sync/atomic" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" ) -type RoundTimeoutFunc func(specqbft.Round) time.Duration +//go:generate mockgen -package=mocks -destination=./mocks/timer.go -source=./timer.go -var ( - quickTimeoutThreshold = specqbft.Round(8) - quickTimeout = 2 * time.Second - slowTimeout = 2 * time.Minute +type OnRoundTimeoutF func(round specqbft.Round) + +const ( + QuickTimeoutThreshold = specqbft.Round(8) + QuickTimeout = 2 * time.Second + SlowTimeout = 2 * time.Minute ) -// RoundTimeout returns the number of seconds until next timeout for a give round. -// if the round is smaller than 8 -> 2s; otherwise -> 2m -// see SIP https://github.com/bloxapp/SIPs/pull/22 -func RoundTimeout(r specqbft.Round) time.Duration { - if r <= quickTimeoutThreshold { - return quickTimeout - } - return slowTimeout +// Timer is an interface for a round timer, calling the UponRoundTimeout when times out +type Timer interface { + // TimeoutForRound will reset running timer if exists and will start a new timer for a specific round + TimeoutForRound(height specqbft.Height, round specqbft.Round) +} + +type BeaconNetwork interface { + GetSlotStartTime(slot phase0.Slot) time.Time + SlotDurationSec() time.Duration +} + +type TimeoutOptions struct { + quickThreshold specqbft.Round + quick time.Duration + slow time.Duration } // RoundTimer helps to manage current instance rounds. @@ -36,28 +47,98 @@ type RoundTimer struct { // timer is the underlying time.Timer timer *time.Timer // result holds the result of the timer - done func() + done OnRoundTimeoutF // round is the current round of the timer round int64 - - roundTimeout RoundTimeoutFunc + // timeoutOptions holds the timeoutOptions for the timer + timeoutOptions TimeoutOptions + // role is the role of the instance + role spectypes.BeaconRole + // beaconNetwork is the beacon network + beaconNetwork BeaconNetwork } // New creates a new instance of RoundTimer. -func New(pctx context.Context, done func()) *RoundTimer { +func New(pctx context.Context, beaconNetwork BeaconNetwork, role spectypes.BeaconRole, done OnRoundTimeoutF) *RoundTimer { ctx, cancelCtx := context.WithCancel(pctx) return &RoundTimer{ - mtx: &sync.RWMutex{}, - ctx: ctx, - cancelCtx: cancelCtx, - timer: nil, - done: done, - roundTimeout: RoundTimeout, + mtx: &sync.RWMutex{}, + ctx: ctx, + cancelCtx: cancelCtx, + timer: nil, + done: done, + role: role, + beaconNetwork: beaconNetwork, + timeoutOptions: TimeoutOptions{ + quickThreshold: QuickTimeoutThreshold, + quick: QuickTimeout, + slow: SlowTimeout, + }, } } +// RoundTimeout calculates the timeout duration for a specific role, height, and round. +// +// Timeout Rules: +// - For roles BNRoleAttester and BNRoleSyncCommittee, the base timeout is 1/3 of the slot duration. +// - For roles BNRoleAggregator and BNRoleSyncCommitteeContribution, the base timeout is 2/3 of the slot duration. +// - For role BNRoleProposer, the timeout is either quickTimeout or slowTimeout, depending on the round. +// +// Additional Timeout: +// - For rounds less than or equal to quickThreshold, the additional timeout is 'quick' seconds. +// - For rounds greater than quickThreshold, the additional timeout is 'slow' seconds. +// +// SIP Reference: +// For more details, see SIP at https://github.com/bloxapp/SIPs/pull/22 +// +// TODO: Update SIP for Deterministic Round Timeout +// TODO: Decide if to make the proposer timeout deterministic +// +// Synchronization Note: +// To ensure synchronized timeouts across instances, the timeout is based on the duty start time, +// which is calculated from the slot height. The base timeout is set based on the role, +// and the additional timeout is added based on the round number. +func (t *RoundTimer) RoundTimeout(height specqbft.Height, round specqbft.Round) time.Duration { + // Initialize duration to zero + var baseDuration time.Duration + + // Set base duration based on role + switch t.role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + // third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + // two-third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 * 2 + default: + if round <= t.timeoutOptions.quickThreshold { + return t.timeoutOptions.quick + } + return t.timeoutOptions.slow + } + + // Calculate additional timeout based on round + var additionalTimeout time.Duration + if round <= t.timeoutOptions.quickThreshold { + additionalTimeout = time.Duration(int(round)) * t.timeoutOptions.quick + } else { + quickPortion := time.Duration(t.timeoutOptions.quickThreshold) * t.timeoutOptions.quick + slowPortion := time.Duration(int(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow + additionalTimeout = quickPortion + slowPortion + } + + // Combine base duration and additional timeout + timeoutDuration := baseDuration + additionalTimeout + + // Get the start time of the duty + dutyStartTime := t.beaconNetwork.GetSlotStartTime(phase0.Slot(height)) + + // Calculate the time until the duty should start plus the timeout duration + return time.Until(dutyStartTime.Add(timeoutDuration)) +} + // OnTimeout sets a function called on timeout. -func (t *RoundTimer) OnTimeout(done func()) { +func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { t.mtx.Lock() // write to t.done defer t.mtx.Unlock() @@ -70,9 +151,10 @@ func (t *RoundTimer) Round() specqbft.Round { } // TimeoutForRound times out for a given round. -func (t *RoundTimer) TimeoutForRound(round specqbft.Round) { +func (t *RoundTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { atomic.StoreInt64(&t.round, int64(round)) - timeout := t.roundTimeout(round) + timeout := t.RoundTimeout(height, round) + // preparing the underlying timer timer := t.timer if timer == nil { @@ -101,7 +183,7 @@ func (t *RoundTimer) waitForRound(round specqbft.Round, timeout <-chan time.Time t.mtx.RLock() // read t.done defer t.mtx.RUnlock() if done := t.done; done != nil { - done() + done(round) } }() } diff --git a/protocol/v2/qbft/roundtimer/timer_test.go b/protocol/v2/qbft/roundtimer/timer_test.go index 8c41410db1..25ce776631 100644 --- a/protocol/v2/qbft/roundtimer/timer_test.go +++ b/protocol/v2/qbft/roundtimer/timer_test.go @@ -2,45 +2,167 @@ package roundtimer import ( "context" + "fmt" + "sync" "sync/atomic" "testing" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer/mocks" ) -func TestRoundTimer_TimeoutForRound(t *testing.T) { - t.Run("TimeoutForRound", func(t *testing.T) { - count := int32(0) - onTimeout := func() { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond - } - timer.TimeoutForRound(specqbft.Round(1)) - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) - - t.Run("timeout round before elapsed", func(t *testing.T) { - count := int32(0) - onTimeout := func() { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond +func TestTimeoutForRound(t *testing.T) { + roles := []spectypes.BeaconRole{ + spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + } + + for _, role := range roles { + t.Run(fmt.Sprintf("TimeoutForRound - %s: <= quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(1)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: > quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(2)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: before elapsed", role), func(t *testing.T) { + testTimeoutForRoundElapsed(t, role, specqbft.Round(2)) + }) + + // TODO: Decide if to make the proposer timeout deterministic + // Proposer role is not tested for multiple synchronized timers since it's not deterministic + if role == spectypes.BNRoleProposer { + continue } - timer.TimeoutForRound(specqbft.Round(1)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) / 2) - timer.TimeoutForRound(specqbft.Round(2)) // reset before elapsed - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(2)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) + t.Run(fmt.Sprintf("TimeoutForRound - %s: multiple synchronized timers", role), func(t *testing.T) { + testTimeoutForRoundMulti(t, role, specqbft.Round(1)) + }) + } +} + +func setupMockBeaconNetwork(t *testing.T) *mocks.MockBeaconNetwork { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(120 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return time.Now() + }, + ).AnyTimes() + return mockBeaconNetwork +} + +func setupTimer(mockBeaconNetwork *mocks.MockBeaconNetwork, onTimeout OnRoundTimeoutF, role spectypes.BeaconRole, round specqbft.Round) *RoundTimer { + timer := New(context.Background(), mockBeaconNetwork, role, onTimeout) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: round, + quick: 100 * time.Millisecond, + slow: 200 * time.Millisecond, + } + + return timer +} + +func testTimeoutForRound(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, threshold) + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, threshold) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundElapsed(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) / 2) + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.Round(2)) // reset before elapsed + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.Round(2)) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundMulti(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + var count int32 + var timestamps = make([]int64, 4) + var mu sync.Mutex + + onTimeout := func(index int) { + atomic.AddInt32(&count, 1) + mu.Lock() + timestamps[index] = time.Now().UnixNano() + mu.Unlock() + } + + timeNow := time.Now() + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(100 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return timeNow + }, + ).AnyTimes() + + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + go func(index int) { + timer := New(context.Background(), mockBeaconNetwork, role, func(round specqbft.Round) { onTimeout(index) }) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: threshold, + quick: 100 * time.Millisecond, + } + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + wg.Done() + }(i) + time.Sleep(time.Millisecond * 10) // Introduce a sleep between creating timers + } + + wg.Wait() // Wait for all go-routines to finish + + timer := New(context.Background(), mockBeaconNetwork, role, nil) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: specqbft.Round(1), + quick: 100 * time.Millisecond, + } + + // Wait a bit more than the expected timeout to ensure all timers have triggered + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) + time.Millisecond*100) + + require.Equal(t, int32(4), atomic.LoadInt32(&count), "All four timers should have triggered") + + mu.Lock() + for i := 1; i < 4; i++ { + require.InDelta(t, timestamps[0], timestamps[i], float64(time.Millisecond*10), "All four timers should expire nearly at the same time") + } + mu.Unlock() } diff --git a/protocol/v2/qbft/spectest/controller_sync_type.go b/protocol/v2/qbft/spectest/controller_sync_type.go deleted file mode 100644 index 08fc7b2332..0000000000 --- a/protocol/v2/qbft/spectest/controller_sync_type.go +++ /dev/null @@ -1,55 +0,0 @@ -package qbft - -import ( - "encoding/hex" - "testing" - - qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/bloxapp/ssv/protocol/v2/types" - - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" - spectypes "github.com/bloxapp/ssv-spec/types" - spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/stretchr/testify/require" -) - -func RunControllerSync(t *testing.T, test *futuremsg.ControllerSyncSpecTest) { - logger := logging.TestLogger(t) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), spectestingutils.TestingValidatorPubKey[:], spectypes.BNRoleAttester) - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), identifier.GetRoleType()) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) - - err := contr.StartNewInstance(logger, 0, []byte{1, 2, 3, 4}) - if err != nil { - t.Fatalf(err.Error()) - } - - var lastErr error - for _, msg := range test.InputMessages { - logger = logger.With(fields.Height(msg.Message.Height)) - _, err := contr.ProcessMsg(logger, msg) - if err != nil { - lastErr = err - } - } - - syncedDecidedCnt := config.GetNetwork().(*spectestingutils.TestingNetwork).SyncHighestDecidedCnt - require.EqualValues(t, test.SyncDecidedCalledCnt, syncedDecidedCnt) - - r, err := contr.GetRoot() - require.NoError(t, err) - require.EqualValues(t, test.ControllerPostRoot, hex.EncodeToString(r[:])) - - if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) - } else { - require.NoError(t, lastErr) - } -} diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index 0d32a545c2..a919cc104b 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -3,6 +3,10 @@ package qbft import ( "bytes" "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" "reflect" "testing" @@ -10,29 +14,32 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + //temporary to override state comparisons from file not inputted one + overrideStateComparisonForControllerSpecTest(t, test) + logger := logging.TestLogger(t) - identifier := []byte{1, 2, 3, 4} - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) + contr := generateController(logger) var lastErr error for i, runData := range test.RunInstanceData { - if err := runInstanceWithData(t, logger, specqbft.Height(i), contr, config, identifier, runData); err != nil { + height := specqbft.Height(i) + if runData.Height != nil { + height = *runData.Height + } + if err := runInstanceWithData(t, logger, height, contr, runData); err != nil { lastErr = err } } @@ -44,13 +51,24 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { } } +func generateController(logger *zap.Logger) *controller.Controller { + identifier := []byte{1, 2, 3, 4} + config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) + return qbfttesting.NewTestingQBFTController( + identifier[:], + spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), + config, + false, + ) +} + func testTimer( t *testing.T, config *qbft.Config, runData *spectests.RunInstanceData, ) { if runData.ExpectedTimerState != nil { - if timer, ok := config.GetTimer().(*spectestingutils.TestQBFTTimer); ok { + if timer, ok := config.GetTimer().(*roundtimer.TestQBFTTimer); ok { require.Equal(t, runData.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, runData.ExpectedTimerState.Round, timer.State.Round) } @@ -79,13 +97,6 @@ func testProcessMsg( } require.EqualValues(t, runData.ExpectedDecidedState.DecidedCnt, decidedCnt, lastErr) - // verify sync decided by range calls - if runData.ExpectedDecidedState.CalledSyncDecidedByRange { - require.EqualValues(t, runData.ExpectedDecidedState.DecidedByRangeValues, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } else { - require.EqualValues(t, [2]specqbft.Height{0, 0}, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } - return lastErr } @@ -129,20 +140,20 @@ func testBroadcastedDecided( } } -func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, config *qbft.Config, identifier []byte, runData *spectests.RunInstanceData) error { +func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, runData *spectests.RunInstanceData) error { err := contr.StartNewInstance(logger, height, runData.InputValue) var lastErr error if err != nil { lastErr = err } - testTimer(t, config, runData) + testTimer(t, contr.GetConfig().(*qbft.Config), runData) - if err := testProcessMsg(t, logger, contr, config, runData); err != nil { + if err := testProcessMsg(t, logger, contr, contr.GetConfig().(*qbft.Config), runData); err != nil { lastErr = err } - testBroadcastedDecided(t, config, identifier, runData) + testBroadcastedDecided(t, contr.GetConfig().(*qbft.Config), contr.Identifier, runData) // test root r, err := contr.GetRoot() @@ -151,3 +162,24 @@ func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Heigh return lastErr } + +func overrideStateComparisonForControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + specDir = filepath.Join(specDir, "generate") + dir := typescomparable.GetSCDir(specDir, reflect.TypeOf(test).String()) + path := filepath.Join(dir, fmt.Sprintf("%s.json", test.TestName())) + byteValue, err := os.ReadFile(filepath.Clean(path)) + require.NoError(t, err) + sc := make([]*controller.Controller, len(test.RunInstanceData)) + require.NoError(t, json.Unmarshal(byteValue, &sc)) + + for i, runData := range test.RunInstanceData { + runData.ControllerPostState = sc[i] + + r, err := sc[i].GetRoot() + require.NoError(t, err) + + runData.ControllerPostRoot = hex.EncodeToString(r[:]) + } +} diff --git a/protocol/v2/qbft/spectest/msg_processing_type.go b/protocol/v2/qbft/spectest/msg_processing_type.go index 63c8922862..15606c2ece 100644 --- a/protocol/v2/qbft/spectest/msg_processing_type.go +++ b/protocol/v2/qbft/spectest/msg_processing_type.go @@ -3,6 +3,8 @@ package qbft import ( "encoding/hex" "fmt" + "path/filepath" + "reflect" "testing" "time" @@ -10,15 +12,19 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" "github.com/stretchr/testify/require" ) // RunMsgProcessing processes MsgProcessingSpecTest. It probably may be removed. func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { + overrideStateComparisonForMsgProcessingSpecTest(t, test) + // a little trick we do to instantiate all the internal instance params preByts, _ := test.Pre.Encode() msgId := specqbft.ControllerIdToMessageID(test.Pre.State.ID) @@ -49,7 +55,7 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected %v, but got %v", test.ExpectedError, lastErr) } else { require.NoError(t, lastErr) } @@ -78,3 +84,22 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { require.EqualValues(t, test.PostRoot, hex.EncodeToString(postRoot[:]), "post root not valid") } + +func overrideStateComparisonForMsgProcessingSpecTest(t *testing.T, test *spectests.MsgProcessingSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + test.PostState, err = typescomparable.UnmarshalStateComparison(specDir, test.TestName(), + reflect.TypeOf(test).String(), + &specqbft.State{}) + require.NoError(t, err) + + r, err := test.PostState.GetRoot() + require.NoError(t, err) + + // backwards compatability test, hard coded post root must be equal to the one loaded from file + if len(test.PostRoot) > 0 { + require.EqualValues(t, test.PostRoot, hex.EncodeToString(r[:])) + } + + test.PostRoot = hex.EncodeToString(r[:]) +} diff --git a/protocol/v2/qbft/spectest/qbft_mapping_test.go b/protocol/v2/qbft/spectest/qbft_mapping_test.go index d771e98d1f..00903a0adc 100644 --- a/protocol/v2/qbft/spectest/qbft_mapping_test.go +++ b/protocol/v2/qbft/spectest/qbft_mapping_test.go @@ -8,13 +8,13 @@ import ( "testing" spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" "github.com/bloxapp/ssv-spec/qbft/spectest/tests/timeout" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/logging" testing2 "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" @@ -31,18 +31,12 @@ func TestQBFTMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test - testName := strings.Split(name, "_")[1] testType := strings.Split(name, "_")[0] - switch testType { case reflect.TypeOf(&spectests.MsgProcessingSpecTest{}).String(): byts, err := json.Marshal(test) @@ -51,6 +45,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsgProcessing(t, typedTest) }) case reflect.TypeOf(&spectests.MsgSpecTest{}).String(): @@ -60,6 +55,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsg(t, typedTest) }) case reflect.TypeOf(&spectests.ControllerSpecTest{}).String(): @@ -69,6 +65,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunControllerSpecTest(t, typedTest) }) case reflect.TypeOf(&spectests.CreateMsgSpecTest{}).String(): @@ -78,6 +75,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunCreateMsg(t, typedTest) }) case reflect.TypeOf(&spectests.RoundRobinSpecTest{}).String(): @@ -87,21 +85,12 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { // using only spec struct so no need to run our version (TODO: check how we choose leader) + t.Parallel() typedTest.Run(t) }) /*t.Run(typedTest.TestName(), func(t *testing.T) { RunMsg(t, typedTest) })*/ - - case reflect.TypeOf(&futuremsg.ControllerSyncSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &futuremsg.ControllerSyncSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { - RunControllerSync(t, typedTest) - }) case reflect.TypeOf(&timeout.SpecTest{}).String(): byts, err := json.Marshal(test) require.NoError(t, err) diff --git a/protocol/v2/qbft/spectest/timeout_type.go b/protocol/v2/qbft/spectest/timeout_type.go index 637e1dd374..73b3fe7cde 100644 --- a/protocol/v2/qbft/spectest/timeout_type.go +++ b/protocol/v2/qbft/spectest/timeout_type.go @@ -7,8 +7,11 @@ import ( "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/stretchr/testify/require" ) @@ -32,7 +35,7 @@ func RunTimeout(t *testing.T, test *SpecTest) { } // test calling timeout - timer, ok := test.Pre.GetConfig().GetTimer().(*testingutils.TestQBFTTimer) + timer, ok := test.Pre.GetConfig().GetTimer().(*roundtimer.TestQBFTTimer) require.True(t, ok) require.Equal(t, test.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, test.ExpectedTimerState.Round, timer.State.Round) diff --git a/protocol/v2/qbft/testing/utils.go b/protocol/v2/qbft/testing/utils.go index 35291f0acc..c6741925ce 100644 --- a/protocol/v2/qbft/testing/utils.go +++ b/protocol/v2/qbft/testing/utils.go @@ -6,10 +6,13 @@ import ( specqbft "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, role types.BeaconRole) *qbft.Config { @@ -31,9 +34,10 @@ var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, ro ProposerF: func(state *specqbft.State, round specqbft.Round) types.OperatorID { return 1 }, - Storage: TestingStores(logger).Get(role), - Network: testingutils.NewTestingNetwork(), - Timer: testingutils.NewTestingTimer(), + Storage: TestingStores(logger).Get(role), + Network: testingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + SignatureVerification: true, } } diff --git a/protocol/v2/queue/worker/message_worker.go b/protocol/v2/queue/worker/message_worker.go index ee96301870..5c9f2b3f97 100644 --- a/protocol/v2/queue/worker/message_worker.go +++ b/protocol/v2/queue/worker/message_worker.go @@ -2,11 +2,12 @@ package worker import ( "context" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) var ( @@ -24,12 +25,12 @@ func init() { } // MsgHandler func that receive message.SSVMessage to handle -type MsgHandler func(msg *spectypes.SSVMessage) error +type MsgHandler func(msg *queue.DecodedSSVMessage) error // ErrorHandler func that handles an error for a specific message -type ErrorHandler func(msg *spectypes.SSVMessage, err error) error +type ErrorHandler func(msg *queue.DecodedSSVMessage, err error) error -func defaultErrHandler(msg *spectypes.SSVMessage, err error) error { +func defaultErrHandler(msg *queue.DecodedSSVMessage, err error) error { return err } @@ -46,7 +47,7 @@ type Worker struct { ctx context.Context cancel context.CancelFunc workersCount int - queue chan *spectypes.SSVMessage + queue chan *queue.DecodedSSVMessage handler MsgHandler errHandler ErrorHandler metricsPrefix string @@ -60,7 +61,7 @@ func NewWorker(logger *zap.Logger, cfg *Config) *Worker { ctx: ctx, cancel: cancel, workersCount: cfg.WorkersCount, - queue: make(chan *spectypes.SSVMessage, cfg.Buffer), + queue: make(chan *queue.DecodedSSVMessage, cfg.Buffer), errHandler: defaultErrHandler, metricsPrefix: cfg.MetrixPrefix, } @@ -78,7 +79,7 @@ func (w *Worker) init(logger *zap.Logger) { } // startWorker process functionality -func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *spectypes.SSVMessage) { +func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *queue.DecodedSSVMessage) { ctx, cancel := context.WithCancel(w.ctx) defer cancel() for { @@ -104,7 +105,7 @@ func (w *Worker) UseErrorHandler(errHandler ErrorHandler) { // TryEnqueue tries to enqueue a job to the given job channel. Returns true if // the operation was successful, and false if enqueuing would not have been // possible without blocking. Job is not enqueued in the latter case. -func (w *Worker) TryEnqueue(msg *spectypes.SSVMessage) bool { +func (w *Worker) TryEnqueue(msg *queue.DecodedSSVMessage) bool { select { case w.queue <- msg: return true @@ -125,7 +126,7 @@ func (w *Worker) Size() int { } // process the msg's from queue -func (w *Worker) process(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (w *Worker) process(logger *zap.Logger, msg *queue.DecodedSSVMessage) { if w.handler == nil { logger.Warn("❗ no handler for worker") return diff --git a/protocol/v2/queue/worker/message_worker_test.go b/protocol/v2/queue/worker/message_worker_test.go index b5cec21317..adbf5032d0 100644 --- a/protocol/v2/queue/worker/message_worker_test.go +++ b/protocol/v2/queue/worker/message_worker_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestWorker(t *testing.T) { @@ -20,12 +20,12 @@ func TestWorker(t *testing.T) { Buffer: 2, }) - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) return nil }) for i := 0; i < 5; i++ { - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) time.Sleep(time.Second * 1) } } @@ -41,7 +41,7 @@ func TestManyWorkers(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() return nil @@ -49,7 +49,7 @@ func TestManyWorkers(t *testing.T) { for i := 0; i < 10; i++ { wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } @@ -65,7 +65,7 @@ func TestBuffer(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() time.Sleep(time.Millisecond * 100) @@ -74,7 +74,7 @@ func TestBuffer(t *testing.T) { for i := 0; i < 11; i++ { // should buffer 10 msgs wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } diff --git a/protocol/v2/ssv/queue/message_prioritizer_test.go b/protocol/v2/ssv/queue/message_prioritizer_test.go index f07e5e2691..deb3654b45 100644 --- a/protocol/v2/ssv/queue/message_prioritizer_test.go +++ b/protocol/v2/ssv/queue/message_prioritizer_test.go @@ -17,7 +17,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) var messagePriorityTests = []struct { @@ -125,7 +124,7 @@ func TestMessagePrioritizer(t *testing.T) { messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { var err error - messages[i], err = DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + messages[i], err = DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) } diff --git a/protocol/v2/ssv/queue/messages.go b/protocol/v2/ssv/queue/messages.go index 01c6fb945c..f69644eee7 100644 --- a/protocol/v2/ssv/queue/messages.go +++ b/protocol/v2/ssv/queue/messages.go @@ -1,25 +1,31 @@ package queue import ( + "fmt" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/pkg/errors" - "go.uber.org/zap" ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) +var ( + ErrUnknownMessageType = fmt.Errorf("unknown message type") +) + // DecodedSSVMessage is a bundle of SSVMessage and it's decoding. +// TODO: try to make it generic type DecodedSSVMessage struct { *spectypes.SSVMessage // Body is the decoded Data. - Body interface{} // *SignedMessage | *SignedPartialSignatureMessage + Body interface{} // *SignedMessage | *SignedPartialSignatureMessage | *EventMsg } // DecodeSSVMessage decodes an SSVMessage and returns a DecodedSSVMessage. -func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { +func DecodeSSVMessage(m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { var body interface{} switch m.MsgType { case spectypes.SSVConsensusMsgType: // TODO: Or message.SSVDecidedMsgType? @@ -40,6 +46,8 @@ func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVM return nil, errors.Wrap(err, "failed to decode EventMsg") } body = msg + default: + return nil, ErrUnknownMessageType } return &DecodedSSVMessage{ SSVMessage: m, diff --git a/protocol/v2/ssv/queue/metrics.go b/protocol/v2/ssv/queue/metrics.go index 99d3c30ad3..36206704cc 100644 --- a/protocol/v2/ssv/queue/metrics.go +++ b/protocol/v2/ssv/queue/metrics.go @@ -1,14 +1,12 @@ package queue import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + spectypes "github.com/bloxapp/ssv-spec/types" ) // Metrics records metrics about the Queue. type Metrics interface { - // Dropped increments the number of messages dropped from the Queue. - Dropped() + DroppedQueueMessage(messageID spectypes.MessageID) } type queueWithMetrics struct { @@ -27,35 +25,8 @@ func WithMetrics(q Queue, metrics Metrics) Queue { func (q *queueWithMetrics) TryPush(msg *DecodedSSVMessage) bool { pushed := q.Queue.TryPush(msg) if !pushed { - q.metrics.Dropped() + q.metrics.DroppedQueueMessage(msg.GetID()) } - return pushed -} - -// TODO: move to metrics/prometheus package -type prometheusMetrics struct { - dropped prometheus.Counter -} - -// NewPrometheusMetrics returns a Prometheus implementation of Metrics. -func NewPrometheusMetrics(messageID string) Metrics { - return &prometheusMetrics{ - dropped: metricMessageDropped.WithLabelValues(messageID), - } -} - -func (m *prometheusMetrics) Dropped() { - m.dropped.Inc() -} -// Register Prometheus metrics. -var ( - metricMessageDropped = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:ibft:msgq:drops", - Help: "The amount of message dropped from the validator's msg queue", - }, []string{"msg_id"}) -) - -func init() { - _ = prometheus.Register(metricMessageDropped) + return pushed } diff --git a/protocol/v2/ssv/queue/queue_test.go b/protocol/v2/ssv/queue/queue_test.go index a835779566..4b46c0e045 100644 --- a/protocol/v2/ssv/queue/queue_test.go +++ b/protocol/v2/ssv/queue/queue_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" "golang.org/x/text/language" "golang.org/x/text/message" ) @@ -109,7 +109,7 @@ func TestPriorityQueue_Pop(t *testing.T) { queue := New(capacity) require.True(t, queue.Empty()) - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) // Push messages. @@ -163,7 +163,7 @@ func TestPriorityQueue_Order(t *testing.T) { // Decode messages. messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { - mm, err := DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + mm, err := DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) messages[i] = mm } @@ -184,30 +184,32 @@ func TestPriorityQueue_Order(t *testing.T) { } } -type mockMetrics struct { - dropped int +type testMetrics struct { + dropped atomic.Uint64 } -func (m *mockMetrics) Dropped() { m.dropped++ } +func (n *testMetrics) DroppedQueueMessage(messageID spectypes.MessageID) { + n.dropped.Add(1) +} func TestWithMetrics(t *testing.T) { - var metrics mockMetrics - queue := WithMetrics(New(1), &metrics) + metrics := &testMetrics{} + queue := WithMetrics(New(1), metrics) require.True(t, queue.Empty()) // Push 1 message. - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) pushed := queue.TryPush(msg) require.True(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 0, metrics.dropped) + require.EqualValues(t, 0, metrics.dropped.Load()) // Push above capacity. pushed = queue.TryPush(msg) require.False(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 1, metrics.dropped) + require.EqualValues(t, 1, metrics.dropped.Load()) } func BenchmarkPriorityQueue_Parallel(b *testing.B) { @@ -234,7 +236,7 @@ func benchmarkPriorityQueueParallel(b *testing.B, factory func() Queue, lossy bo messages := make([]*DecodedSSVMessage, messageCount) for i := range messages { var err error - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(b, err) messages[i] = msg } @@ -359,7 +361,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { for _, i := range rand.Perm(messageCount) { height := qbft.FirstHeight + qbft.Height(i) for _, t := range types { - decoded, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) + decoded, err := DecodeSSVMessage(mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) require.NoError(b, err) msgs <- decoded } @@ -412,7 +414,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { } func decodeAndPush(t require.TestingT, queue Queue, msg mockMessage, state *State) *DecodedSSVMessage { - decoded, err := DecodeSSVMessage(zap.L(), msg.ssvMessage(state)) + decoded, err := DecodeSSVMessage(msg.ssvMessage(state)) require.NoError(t, err) queue.Push(decoded) return decoded diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index b59d404907..1fc2225e15 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -54,7 +54,8 @@ type BaseRunner struct { BeaconRoleType spectypes.BeaconRole // implementation vars - TimeoutF TimeoutF `json:"-"` + TimeoutF TimeoutF `json:"-"` + VerifySignatures bool `json:"-"` // highestDecidedSlot holds the highest decided duty slot and gets updated after each decided is reached highestDecidedSlot spec.Slot @@ -96,6 +97,9 @@ func NewBaseRunner( // baseStartNewDuty is a base func that all runner implementation can call to start a duty func (b *BaseRunner) baseStartNewDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessDuty(duty); err != nil { + return errors.Wrap(err, "can't start duty") + } b.baseSetupForNewDuty(duty) return runner.executeDuty(logger, duty) } @@ -265,3 +269,11 @@ func (b *BaseRunner) hasRunningDuty() bool { } return !b.State.Finished } + +func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { + if b.QBFTController.Height >= specqbft.Height(duty.Slot) { + return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + b.QBFTController.Height) + } + return nil +} diff --git a/protocol/v2/ssv/runner/runner_signatures.go b/protocol/v2/ssv/runner/runner_signatures.go index 96b2a723f5..edfc608ea7 100644 --- a/protocol/v2/ssv/runner/runner_signatures.go +++ b/protocol/v2/ssv/runner/runner_signatures.go @@ -3,10 +3,11 @@ package runner import ( spec "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/types" ssz "github.com/ferranbt/fastssz" "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" + + "github.com/bloxapp/ssv/protocol/v2/types" ) func (b *BaseRunner) signBeaconObject( @@ -57,13 +58,15 @@ func (b *BaseRunner) validatePartialSigMsgForSlot( return errors.New("invalid partial sig slot") } - if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { - return errors.Wrap(err, "failed to verify PartialSignature") - } + if b.VerifySignatures { + if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { + return errors.Wrap(err, "failed to verify PartialSignature") + } - for _, msg := range signedMsg.Message.Messages { - if err := b.verifyBeaconPartialSignature(msg); err != nil { - return errors.Wrap(err, "could not verify Beacon partial Signature") + for _, msg := range signedMsg.Message.Messages { + if err := b.verifyBeaconPartialSignature(msg); err != nil { + return errors.Wrap(err, "could not verify Beacon partial Signature") + } } } diff --git a/protocol/v2/ssv/runner/timer.go b/protocol/v2/ssv/runner/timer.go index 9d8e4a315f..51e25ccbf6 100644 --- a/protocol/v2/ssv/runner/timer.go +++ b/protocol/v2/ssv/runner/timer.go @@ -9,7 +9,7 @@ import ( "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) -type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() +type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF func (b *BaseRunner) registerTimeoutHandler(logger *zap.Logger, instance *instance.Instance, height specqbft.Height) { identifier := spectypes.MessageIDFromBytes(instance.State.ID) diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 10bf6a39fe..68bc4351b8 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -2,6 +2,7 @@ package runner import ( "crypto/sha256" + "encoding/hex" "encoding/json" v1 "github.com/attestantio/go-eth2-client/api/v1" @@ -53,7 +54,13 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because + // that requires a QBFTController which ValidatorRegistrationRunner doesn't have. + if r.HasRunningDuty() { + return errors.New("already running duty") + } + r.BaseRunner.baseSetupForNewDuty(duty) + return r.executeDuty(logger, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) @@ -85,7 +92,9 @@ func (r *ValidatorRegistrationRunner) ProcessPreConsensus(logger *zap.Logger, si return errors.Wrap(err, "could not submit validator registration") } - logger.Debug("validator registration submitted successfully", fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:])) + logger.Debug("validator registration submitted successfully", + fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:]), + zap.String("signature", hex.EncodeToString(specSig[:]))) r.GetState().Finished = true return nil diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go new file mode 100644 index 0000000000..7eba30c616 --- /dev/null +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -0,0 +1,232 @@ +package runner + +import ( + "crypto/sha256" + "encoding/json" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + specssv "github.com/bloxapp/ssv-spec/ssv" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/protocol/v2/ssv/runner/metrics" + ssz "github.com/ferranbt/fastssz" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// Duty runner for validator voluntary exit duty +type VoluntaryExitRunner struct { + BaseRunner *BaseRunner + + beacon specssv.BeaconNode + network specssv.Network + signer spectypes.KeyManager + valCheck specqbft.ProposedValueCheckF + + voluntaryExit *phase0.VoluntaryExit + + metrics metrics.ConsensusMetrics +} + +func NewVoluntaryExitRunner( + beaconNetwork spectypes.BeaconNetwork, + share *spectypes.Share, + beacon specssv.BeaconNode, + network specssv.Network, + signer spectypes.KeyManager, +) Runner { + return &VoluntaryExitRunner{ + BaseRunner: &BaseRunner{ + BeaconRoleType: spectypes.BNRoleVoluntaryExit, + BeaconNetwork: beaconNetwork, + Share: share, + }, + + beacon: beacon, + network: network, + signer: signer, + metrics: metrics.NewConsensusMetrics(spectypes.BNRoleValidatorRegistration), + } +} + +func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { + // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because + // that requires a QBFTController which VoluntaryExitRunner doesn't have. + if r.HasRunningDuty() { + return errors.New("already running duty") + } + r.BaseRunner.baseSetupForNewDuty(duty) + return r.executeDuty(logger, duty) +} + +// HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) +func (r *VoluntaryExitRunner) HasRunningDuty() bool { + return r.BaseRunner.hasRunningDuty() +} + +// Check for quorum of partial signatures over VoluntaryExit and, +// if has quorum, constructs SignedVoluntaryExit and submits to BeaconNode +func (r *VoluntaryExitRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + quorum, roots, err := r.BaseRunner.basePreConsensusMsgProcessing(r, signedMsg) + if err != nil { + return errors.Wrap(err, "failed processing voluntary exit message") + } + + // quorum returns true only once (first time quorum achieved) + if !quorum { + return nil + } + + // only 1 root, verified in basePreConsensusMsgProcessing + root := roots[0] + fullSig, err := r.GetState().ReconstructBeaconSig(r.GetState().PreConsensusContainer, root, r.GetShare().ValidatorPubKey) + if err != nil { + return errors.Wrap(err, "could not reconstruct voluntary exit sig") + } + specSig := phase0.BLSSignature{} + copy(specSig[:], fullSig) + + // create SignedVoluntaryExit using VoluntaryExit created on r.executeDuty() and reconstructed signature + signedVoluntaryExit := &phase0.SignedVoluntaryExit{ + Message: r.voluntaryExit, + Signature: specSig, + } + + if err := r.beacon.SubmitVoluntaryExit(signedVoluntaryExit, specSig); err != nil { + return errors.Wrap(err, "could not submit voluntary exit") + } + + r.GetState().Finished = true + return nil +} + +func (r *VoluntaryExitRunner) ProcessConsensus(logger *zap.Logger, signedMsg *specqbft.SignedMessage) error { + return errors.New("no consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + return errors.New("no post consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + vr, err := r.calculateVoluntaryExit() + if err != nil { + return nil, spectypes.DomainError, errors.Wrap(err, "could not calculate voluntary exit") + } + return []ssz.HashRoot{vr}, spectypes.DomainVoluntaryExit, nil +} + +// expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign +func (r *VoluntaryExitRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + return nil, [4]byte{}, errors.New("no post consensus roots for voluntary exit") +} + +// Validator voluntary exit duty doesn't need consensus nor post-consensus. +// It just performs pre-consensus with VoluntaryExitPartialSig over +// a VoluntaryExit object to create a SignedVoluntaryExit +func (r *VoluntaryExitRunner) executeDuty(logger *zap.Logger, duty *spectypes.Duty) error { + voluntaryExit, err := r.calculateVoluntaryExit() + if err != nil { + return errors.Wrap(err, "could not calculate voluntary exit") + } + + // get PartialSignatureMessage with voluntaryExit root and signature + msg, err := r.BaseRunner.signBeaconObject(r, voluntaryExit, duty.Slot, spectypes.DomainVoluntaryExit) + if err != nil { + return errors.Wrap(err, "could not sign VoluntaryExit object") + } + + msgs := spectypes.PartialSignatureMessages{ + Type: spectypes.VoluntaryExitPartialSig, + Slot: duty.Slot, + Messages: []*spectypes.PartialSignatureMessage{msg}, + } + + // sign PartialSignatureMessages object + signature, err := r.GetSigner().SignRoot(msgs, spectypes.PartialSignatureType, r.GetShare().SharePubKey) + if err != nil { + return errors.Wrap(err, "could not sign randao msg") + } + signedPartialMsg := &spectypes.SignedPartialSignatureMessage{ + Message: msgs, + Signature: signature, + Signer: r.GetShare().OperatorID, + } + + // broadcast + data, err := signedPartialMsg.Encode() + if err != nil { + return errors.Wrap(err, "failed to encode signedPartialMsg with VoluntaryExit") + } + msgToBroadcast := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(r.GetShare().DomainType, r.GetShare().ValidatorPubKey, r.BaseRunner.BeaconRoleType), + Data: data, + } + if err := r.GetNetwork().Broadcast(msgToBroadcast); err != nil { + return errors.Wrap(err, "can't broadcast signedPartialMsg with VoluntaryExit") + } + + // stores value for later using in ProcessPreConsensus + r.voluntaryExit = voluntaryExit + + return nil +} + +// Returns *phase0.VoluntaryExit object with current epoch and own validator index +func (r *VoluntaryExitRunner) calculateVoluntaryExit() (*phase0.VoluntaryExit, error) { + epoch := r.BaseRunner.BeaconNetwork.EstimatedEpochAtSlot(r.BaseRunner.State.StartingDuty.Slot) + validatorIndex := r.GetState().StartingDuty.ValidatorIndex + return &phase0.VoluntaryExit{ + Epoch: epoch, + ValidatorIndex: validatorIndex, + }, nil +} + +func (r *VoluntaryExitRunner) GetBaseRunner() *BaseRunner { + return r.BaseRunner +} + +func (r *VoluntaryExitRunner) GetNetwork() specssv.Network { + return r.network +} + +func (r *VoluntaryExitRunner) GetBeaconNode() specssv.BeaconNode { + return r.beacon +} + +func (r *VoluntaryExitRunner) GetShare() *spectypes.Share { + return r.BaseRunner.Share +} + +func (r *VoluntaryExitRunner) GetState() *State { + return r.BaseRunner.State +} + +func (r *VoluntaryExitRunner) GetValCheckF() specqbft.ProposedValueCheckF { + return r.valCheck +} + +func (r *VoluntaryExitRunner) GetSigner() spectypes.KeyManager { + return r.signer +} + +// Encode returns the encoded struct in bytes or error +func (r *VoluntaryExitRunner) Encode() ([]byte, error) { + return json.Marshal(r) +} + +// Decode returns error if decoding failed +func (r *VoluntaryExitRunner) Decode(data []byte) error { + return json.Unmarshal(data, &r) +} + +// GetRoot returns the root used for signing and verification +func (r *VoluntaryExitRunner) GetRoot() ([32]byte, error) { + marshaledRoot, err := r.Encode() + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not encode DutyRunnerState") + } + ret := sha256.Sum256(marshaledRoot) + return ret, nil +} diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 19fd0c71c8..412b92b8da 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -2,6 +2,9 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" specqbft "github.com/bloxapp/ssv-spec/qbft" @@ -9,12 +12,15 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + "go.uber.org/zap" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" ssvtesting "github.com/bloxapp/ssv/protocol/v2/ssv/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type MsgProcessingSpecTest struct { @@ -23,6 +29,7 @@ type MsgProcessingSpecTest struct { Duty *spectypes.Duty Messages []*spectypes.SSVMessage PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json // OutputMessages compares pre/ post signed partial sigs to output. We exclude consensus msgs as it's tested in consensus OutputMessages []*spectypes.SignedPartialSignatureMessage BeaconBroadcastedRoots []string @@ -36,6 +43,13 @@ func (test *MsgProcessingSpecTest) TestName() string { func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { logger := logging.TestLogger(t) + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + +func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { + test.Runner.GetBaseRunner().VerifySignatures = true + v := ssvtesting.BaseValidator(logger, spectestingutils.KeySetForShare(test.Runner.GetBaseRunner().Share)) v.DutyRunners[test.Runner.GetBaseRunner().BeaconRoleType] = test.Runner v.Network = test.Runner.GetNetwork().(specqbft.Network) // TODO need to align @@ -45,7 +59,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { lastErr = v.StartDuty(logger, test.Duty) } for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue @@ -57,7 +71,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected: %v", test.ExpectedError) } else { require.NoError(t, lastErr) } @@ -143,3 +157,43 @@ func (test *MsgProcessingSpecTest) compareOutputMsgs(t *testing.T, v *validator. index++ } } + +func (test *MsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, test.Name, testType) +} + +func overrideStateComparison(t *testing.T, test *MsgProcessingSpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/multi_msg_processing_type.go b/protocol/v2/ssv/spectest/multi_msg_processing_type.go index 0b4b926f6e..4d040782e2 100644 --- a/protocol/v2/ssv/spectest/multi_msg_processing_type.go +++ b/protocol/v2/ssv/spectest/multi_msg_processing_type.go @@ -1,10 +1,20 @@ package spectest -import "testing" +import ( + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/bloxapp/ssv/logging" + "go.uber.org/zap" +) type MultiMsgProcessingSpecTest struct { Name string Tests []*MsgProcessingSpecTest + + logger *zap.Logger } func (tests *MultiMsgProcessingSpecTest) TestName() string { @@ -12,10 +22,23 @@ func (tests *MultiMsgProcessingSpecTest) TestName() string { } func (tests *MultiMsgProcessingSpecTest) Run(t *testing.T) { + tests.logger = logging.TestLogger(t) + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - RunMsgProcessing(t, test) + test.RunAsPartOfMultiTest(t, tests.logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiMsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, path, testType) + } +} diff --git a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go index c8bf0cae80..cfac13ec9d 100644 --- a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go +++ b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go @@ -2,14 +2,19 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type StartNewRunnerDutySpecTest struct { @@ -17,6 +22,7 @@ type StartNewRunnerDutySpecTest struct { Runner runner.Runner Duty *spectypes.Duty PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json OutputMessages []*spectypes.SignedPartialSignatureMessage ExpectedError string } @@ -25,7 +31,14 @@ func (test *StartNewRunnerDutySpecTest) TestName() string { return test.Name } -func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { +// overrideStateComparison overrides the state comparison to compare the runner state +func (test *StartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, test.Name, testType) +} + +func (test *StartNewRunnerDutySpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { err := test.Runner.StartNewDuty(logger, test.Duty) if len(test.ExpectedError) > 0 { require.EqualError(t, err, test.ExpectedError) @@ -84,6 +97,11 @@ func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:])) } +func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + type MultiStartNewRunnerDutySpecTest struct { Name string Tests []*StartNewRunnerDutySpecTest @@ -94,10 +112,56 @@ func (tests *MultiStartNewRunnerDutySpecTest) TestName() string { } func (tests *MultiStartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - test.Run(t, logger) + test.RunAsPartOfMultiTest(t, logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiStartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, path, testType) + } +} + +func overrideStateComparisonForStartNewRunnerDutySpecTest(t *testing.T, test *StartNewRunnerDutySpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index ccb15f0285..14fac24b35 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -2,7 +2,6 @@ package spectest import ( "encoding/json" - "fmt" "os" "reflect" "strings" @@ -19,7 +18,6 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" @@ -41,105 +39,130 @@ func TestSSVMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test + r := prepareTest(t, logger, name, test) + if r != nil { + t.Run(r.name, func(t *testing.T) { + t.Parallel() + r.test(t) + }) + } + } +} - testName := strings.Split(name, "_")[1] - testType := strings.Split(name, "_")[0] +type runnable struct { + name string + test func(t *testing.T) +} - fmt.Printf("--------- %s - %s \n", testType, testName) +func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{}) *runnable { + testName := strings.Split(name, "_")[1] + testType := strings.Split(name, "_")[0] - switch testType { - case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &MsgProcessingSpecTest{ - Runner: &runner.AttesterRunner{}, - } - // TODO fix blinded test - if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { - continue - } - require.NoError(t, json.Unmarshal(byts, &typedTest)) + switch testType { + case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &MsgProcessingSpecTest{ + Runner: &runner.AttesterRunner{}, + } + // TODO: fix blinded test + if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { + logger.Info("skipping blinded block test", zap.String("test", testName)) + return nil + } + require.NoError(t, json.Unmarshal(byts, &typedTest)) - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunMsgProcessing(t, typedTest) - }) - case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*MsgProcessingSpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiMsgProcessingSpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): + typedTest := &MultiMsgProcessingSpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &messages.MsgSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &messages.MsgSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.SpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.SpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.MultiSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.MultiSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunSyncCommitteeAggProof(t, typedTest) - }) - case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*StartNewRunnerDutySpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiStartNewRunnerDutySpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): + typedTest := &MultiStartNewRunnerDutySpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) + } typedTest.Run(t, logger) - }) - default: - t.Fatalf("unsupported test type %s [%s]", testType, testName) + }, } + default: + t.Fatalf("unsupported test type %s [%s]", testType, testName) + return nil } } @@ -324,6 +347,10 @@ func baseRunnerForRole(logger *zap.Logger, role spectypes.BeaconRole, base *runn ret := ssvtesting.ValidatorRegistrationRunner(logger, ks) ret.(*runner.ValidatorRegistrationRunner).BaseRunner = base return ret + case spectypes.BNRoleVoluntaryExit: + ret := ssvtesting.VoluntaryExitRunner(logger, ks) + ret.(*runner.VoluntaryExitRunner).BaseRunner = base + return ret case testingutils.UnknownDutyType: ret := ssvtesting.UnknownDutyTypeRunner(logger, ks) ret.(*runner.AttesterRunner).BaseRunner = base diff --git a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go index 9e12cab157..2fd4091732 100644 --- a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go +++ b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go @@ -24,7 +24,7 @@ func RunSyncCommitteeAggProof(t *testing.T, test *synccommitteeaggregator.SyncCo lastErr := v.StartDuty(logger, &testingutils.TestingSyncCommitteeContributionDuty) for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 2d8fcc8095..7689d10073 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -23,14 +23,14 @@ var AttesterRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySe //} var ProposerRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { - return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), keySet) + return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet) } var ProposerBlindedBlockRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { ret := baseRunner( logger, spectypes.BNRoleProposer, - specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), + specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet, ) ret.(*runner.ProposerRunner).ProducesBlindedBlocks = true @@ -54,6 +54,10 @@ var ValidatorRegistrationRunner = func(logger *zap.Logger, keySet *spectestingut return ret } +var VoluntaryExitRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { + return baseRunner(logger, spectypes.BNRoleVoluntaryExit, nil, keySet) +} + var UnknownDutyTypeRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { return baseRunner(logger, spectestingutils.UnknownDutyType, spectestingutils.UnknownDutyValueCheck(), keySet) } @@ -144,6 +148,14 @@ var baseRunner = func(logger *zap.Logger, role spectypes.BeaconRole, valCheck sp net, km, ) + case spectypes.BNRoleVoluntaryExit: + return runner.NewVoluntaryExitRunner( + spectypes.BeaconTestNetwork, + share, + spectestingutils.NewTestingBeaconNode(), + net, + km, + ) case spectestingutils.UnknownDutyType: ret := runner.NewAttesterRunnner( spectypes.BeaconTestNetwork, diff --git a/protocol/v2/ssv/testing/validator.go b/protocol/v2/ssv/testing/validator.go index 844145bd8c..d006111c2b 100644 --- a/protocol/v2/ssv/testing/validator.go +++ b/protocol/v2/ssv/testing/validator.go @@ -7,6 +7,7 @@ import ( spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "go.uber.org/zap" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/protocol/v2/qbft/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" @@ -22,7 +23,7 @@ var BaseValidator = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet validator.Options{ Network: spectestingutils.NewTestingNetwork(), Beacon: spectestingutils.NewTestingBeaconNode(), - BeaconNetwork: spectypes.BeaconTestNetwork, + BeaconNetwork: networkconfig.TestNetwork.Beacon, Storage: testing.TestingStores(logger), SSVShare: &types.SSVShare{ Share: *spectestingutils.TestingShare(keySet), diff --git a/protocol/v2/ssv/validator/metrics.go b/protocol/v2/ssv/validator/metrics.go new file mode 100644 index 0000000000..ce1840736b --- /dev/null +++ b/protocol/v2/ssv/validator/metrics.go @@ -0,0 +1,45 @@ +package validator + +import ( + "time" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" +) + +type Metrics interface { + ValidatorInactive(publicKey []byte) + ValidatorNoIndex(publicKey []byte) + ValidatorError(publicKey []byte) + ValidatorReady(publicKey []byte) + ValidatorNotActivated(publicKey []byte) + ValidatorExiting(publicKey []byte) + ValidatorSlashed(publicKey []byte) + ValidatorNotFound(publicKey []byte) + ValidatorPending(publicKey []byte) + ValidatorRemoved(publicKey []byte) + ValidatorUnknown(publicKey []byte) + + queue.Metrics +} + +type NopMetrics struct{} + +func (n NopMetrics) ValidatorInactive([]byte) {} +func (n NopMetrics) ValidatorNoIndex([]byte) {} +func (n NopMetrics) ValidatorError([]byte) {} +func (n NopMetrics) ValidatorReady([]byte) {} +func (n NopMetrics) ValidatorNotActivated([]byte) {} +func (n NopMetrics) ValidatorExiting([]byte) {} +func (n NopMetrics) ValidatorSlashed([]byte) {} +func (n NopMetrics) ValidatorNotFound([]byte) {} +func (n NopMetrics) ValidatorPending([]byte) {} +func (n NopMetrics) ValidatorRemoved([]byte) {} +func (n NopMetrics) ValidatorUnknown([]byte) {} +func (n NopMetrics) IncomingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) OutgoingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) DroppedQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) MessageQueueSize(int) {} +func (n NopMetrics) MessageQueueCapacity(int) {} +func (n NopMetrics) MessageTimeInQueue(spectypes.MessageID, time.Duration) {} diff --git a/protocol/v2/ssv/validator/msgqueue_consumer.go b/protocol/v2/ssv/validator/msgqueue_consumer.go index 7ba5efb119..ba82efa396 100644 --- a/protocol/v2/ssv/validator/msgqueue_consumer.go +++ b/protocol/v2/ssv/validator/msgqueue_consumer.go @@ -28,7 +28,8 @@ type queueContainer struct { // HandleMessage handles a spectypes.SSVMessage. // TODO: accept DecodedSSVMessage once p2p is upgraded to decode messages during validation. -func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +// TODO: get rid of logger, add context +func (v *Validator) HandleMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { v.mtx.RLock() // read v.Queues defer v.mtx.RUnlock() @@ -37,22 +38,13 @@ func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) // fields.Role(msg.MsgID.GetRoleType())) if q, ok := v.Queues[msg.MsgID.GetRoleType()]; ok { - decodedMsg, err := queue.DecodeSSVMessage(logger, msg) - if err != nil { - logger.Warn("❗ failed to decode message", - zap.Error(err), - zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), - zap.String("msg_id", msg.MsgID.String()), - ) - return - } - if pushed := q.Q.TryPush(decodedMsg); !pushed { + if pushed := q.Q.TryPush(msg); !pushed { msgID := msg.MsgID.String() logger.Warn("❗ dropping message because the queue is full", zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), zap.String("msg_id", msgID)) } - // logger.Debug("📬 queue: pushed message", fields.MessageID(decodedMsg.MsgID), fields.MessageType(decodedMsg.MsgType)) + // logger.Debug("📬 queue: pushed message", fields.MessageID(msg.MsgID), fields.MessageType(msg.MsgType)) } else { logger.Error("❌ missing queue for role type", fields.Role(msg.MsgID.GetRoleType())) } diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index 3d03a44d4e..e1bcf47df7 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -9,6 +9,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" qbftcontroller "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -21,9 +22,10 @@ type NonCommitteeValidator struct { func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID, opts Options) *NonCommitteeValidator { // currently, only need domain & storage config := &qbft.Config{ - Domain: types.GetDefaultDomain(), - Storage: opts.Storage.Get(identifier.GetRoleType()), - Network: opts.Network, + Domain: types.GetDefaultDomain(), + Storage: opts.Storage.Get(identifier.GetRoleType()), + Network: opts.Network, + SignatureVerification: opts.VerifySignatures, } ctrl := qbftcontroller.NewController(identifier[:], &opts.SSVShare.Share, types.GetDefaultDomain(), config, opts.FullNode) ctrl.StoredInstances = make(qbftcontroller.InstanceContainer, 0, nonCommitteeInstanceContainerCapacity(opts.FullNode)) @@ -39,7 +41,7 @@ func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID } } -func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { logger = logger.With(fields.PubKey(msg.MsgID.GetPubKey()), fields.Role(msg.MsgID.GetRoleType())) if err := validateMessage(ncv.Share.Share, msg); err != nil { diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index e1085dead6..9c2e0d81a7 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -6,6 +6,8 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" qbftctrl "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/types" @@ -19,7 +21,7 @@ const ( type Options struct { Network specqbft.Network Beacon specssv.BeaconNode - BeaconNetwork spectypes.BeaconNetwork + BeaconNetwork beacon.BeaconNetwork Storage *storage.QBFTStores SSVShare *types.SSVShare Signer spectypes.KeyManager @@ -30,6 +32,9 @@ type Options struct { BuilderProposals bool QueueSize int GasLimit uint64 + MessageValidator validation.MessageValidator + Metrics Metrics + VerifySignatures bool } func (o *Options) defaults() { diff --git a/protocol/v2/ssv/validator/startup.go b/protocol/v2/ssv/validator/startup.go index 4ca2c8acea..b316e8c9f2 100644 --- a/protocol/v2/ssv/validator/startup.go +++ b/protocol/v2/ssv/validator/startup.go @@ -1,9 +1,7 @@ package validator import ( - "context" "sync/atomic" - "time" "github.com/bloxapp/ssv-spec/p2p" spectypes "github.com/bloxapp/ssv-spec/types" @@ -56,7 +54,6 @@ func (v *Validator) Start(logger *zap.Logger) (started bool, err error) { return true, err } go v.StartQueueConsumer(logger, identifier, v.ProcessMessage) - go v.sync(logger, identifier) } return true, nil } @@ -73,27 +70,3 @@ func (v *Validator) Stop() { v.Queues = make(map[spectypes.BeaconRole]queueContainer) } } - -// sync performs highest decided sync -func (v *Validator) sync(logger *zap.Logger, mid spectypes.MessageID) { - ctx, cancel := context.WithCancel(v.ctx) - defer cancel() - - // TODO: config? - interval := time.Second - retries := 3 - - for ctx.Err() == nil { - err := v.Network.SyncHighestDecided(mid) - if err != nil { - logger.Debug("❌ failed to sync highest decided", zap.Error(err)) - retries-- - if retries > 0 { - interval *= 2 - time.Sleep(interval) - continue - } - } - return - } -} diff --git a/protocol/v2/ssv/validator/timer.go b/protocol/v2/ssv/validator/timer.go index 87013bd5dd..6b819b992b 100644 --- a/protocol/v2/ssv/validator/timer.go +++ b/protocol/v2/ssv/validator/timer.go @@ -10,12 +10,13 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) -func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() { - return func() { +func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF { + return func(round specqbft.Round) { v.mtx.RLock() // read-lock for v.Queues, v.state defer v.mtx.RUnlock() @@ -30,12 +31,12 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID return } - msg, err := v.createTimerMessage(identifier, height) + msg, err := v.createTimerMessage(identifier, height, round) if err != nil { logger.Debug("❗ failed to create timer msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, msg) + dec, err := queue.DecodeSSVMessage(msg) if err != nil { logger.Debug("❌ failed to decode timer msg", zap.Error(err)) return @@ -49,8 +50,11 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID } } -func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height) (*spectypes.SSVMessage, error) { - td := types.TimeoutData{Height: height} +func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height, round specqbft.Round) (*spectypes.SSVMessage, error) { + td := types.TimeoutData{ + Height: height, + Round: round, + } data, err := json.Marshal(td) if err != nil { return nil, errors.Wrap(err, "failed to marshal timeout data") diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index 7f1dd80d2e..0fa54de66a 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -13,6 +13,7 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" @@ -39,24 +40,31 @@ type Validator struct { dutyIDs *hashmap.Map[spectypes.BeaconRole, string] state uint32 + + messageValidator validation.MessageValidator } // NewValidator creates a new instance of Validator. func NewValidator(pctx context.Context, cancel func(), options Options) *Validator { options.defaults() + if options.Metrics == nil { + options.Metrics = &NopMetrics{} + } + v := &Validator{ - mtx: &sync.RWMutex{}, - ctx: pctx, - cancel: cancel, - DutyRunners: options.DutyRunners, - Network: options.Network, - Storage: options.Storage, - Share: options.SSVShare, - Signer: options.Signer, - Queues: make(map[spectypes.BeaconRole]queueContainer), - state: uint32(NotStarted), - dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + mtx: &sync.RWMutex{}, + ctx: pctx, + cancel: cancel, + DutyRunners: options.DutyRunners, + Network: options.Network, + Storage: options.Storage, + Share: options.SSVShare, + Signer: options.Signer, + Queues: make(map[spectypes.BeaconRole]queueContainer), + state: uint32(NotStarted), + dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + messageValidator: options.MessageValidator, } for _, dutyRunner := range options.DutyRunners { @@ -65,10 +73,9 @@ func NewValidator(pctx context.Context, cancel func(), options Options) *Validat // Setup the queue. role := dutyRunner.GetBaseRunner().BeaconRoleType - msgID := spectypes.NewMsgID(types.GetDefaultDomain(), options.SSVShare.ValidatorPubKey, role).String() v.Queues[role] = queueContainer{ - Q: queue.WithMetrics(queue.New(options.QueueSize), queue.NewPrometheusMetrics(msgID)), + Q: queue.WithMetrics(queue.New(options.QueueSize), options.Metrics), queueState: &queue.State{ HasRunningInstance: false, Height: 0, @@ -111,7 +118,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess return fmt.Errorf("could not get duty runner for msg ID %v", messageID) } - if err := validateMessage(v.Share.Share, msg.SSVMessage); err != nil { + if err := validateMessage(v.Share.Share, msg); err != nil { return fmt.Errorf("message invalid for msg ID %v: %w", messageID, err) } @@ -143,7 +150,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess } } -func validateMessage(share spectypes.Share, msg *spectypes.SSVMessage) error { +func validateMessage(share spectypes.Share, msg *queue.DecodedSSVMessage) error { if !share.ValidatorPubKey.MessageIDBelongs(msg.GetID()) { return errors.New("msg ID doesn't match validator ID") } diff --git a/protocol/v2/sync/handlers/decided_history.go b/protocol/v2/sync/handlers/decided_history.go deleted file mode 100644 index 3dc960cfcb..0000000000 --- a/protocol/v2/sync/handlers/decided_history.go +++ /dev/null @@ -1,57 +0,0 @@ -package handlers - -import ( - "fmt" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// HistoryHandler handler for decided history protocol -// TODO: add msg validation and report scores -func HistoryHandler(logger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting, maxBatchSize int) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := logger.With(zap.String("msg_id", fmt.Sprintf("%x", msg.MsgID))) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.DecidedHistoryType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - items := int(sm.Params.Height[1] - sm.Params.Height[0]) - if items > maxBatchSize { - sm.Params.Height[1] = sm.Params.Height[0] + specqbft.Height(maxBatchSize) - } - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instances, err := store.GetInstancesInRange(msgID[:], sm.Params.Height[0], sm.Params.Height[1]) - results := make([]*specqbft.SignedMessage, 0, len(instances)) - for _, instance := range instances { - results = append(results, instance.DecidedMessage) - } - sm.UpdateResults(err, results...) - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/sync/handlers/last_decided.go b/protocol/v2/sync/handlers/last_decided.go deleted file mode 100644 index 6b33579b0f..0000000000 --- a/protocol/v2/sync/handlers/last_decided.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "fmt" - - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// LastDecidedHandler handler for last-decided protocol -// TODO: add msg validation and report scores -func LastDecidedHandler(plogger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := plogger.With(fields.PubKey(msg.MsgID.GetPubKey())) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.LastDecidedType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instance, err := store.GetHighestInstance(msgID[:]) - if err != nil { - logger.Debug("❗ failed to get highest instance", zap.Error(err)) - } else if instance != nil { - sm.UpdateResults(err, instance.DecidedMessage) - } - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/testing/test_utils.go b/protocol/v2/testing/test_utils.go index 2b2f79e4c1..7994e60361 100644 --- a/protocol/v2/testing/test_utils.go +++ b/protocol/v2/testing/test_utils.go @@ -1,6 +1,7 @@ package testing import ( + "fmt" "os" "path" "path/filepath" @@ -145,9 +146,25 @@ func AggregateInvalidSign(t *testing.T, sks map[spectypes.OperatorID]*bls.Secret } func GetSpecTestJSON(path string, module string) ([]byte, error) { + p, err := GetSpecDir(path, module) + if err != nil { + return nil, fmt.Errorf("could not get spec test dir: %w", err) + } + return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(specTestPath))) +} + +// GetSpecDir returns the path to the ssv-spec module. +func GetSpecDir(path, module string) (string, error) { + if path == "" { + var err error + path, err = os.Getwd() + if err != nil { + return "", errors.New("could not get current directory") + } + } goModFile, err := getGoModFile(path) if err != nil { - return nil, errors.New("could not get go.mod file") + return "", errors.New("could not get go.mod file") } // check if there is a replace @@ -173,7 +190,7 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { } } if req == nil { - return nil, errors.Errorf("could not find %s module", specModule) + return "", errors.Errorf("could not find %s module", specModule) } modPath = req.Mod.Path modVersion = req.Mod.Version @@ -182,14 +199,14 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { // get module path p, err := GetModulePath(modPath, modVersion) if err != nil { - return nil, errors.Wrap(err, "could not get module path") + return "", errors.Wrap(err, "could not get module path") } if _, err := os.Stat(p); os.IsNotExist(err) { - return nil, errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) + return "", errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) } - return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(module), filepath.Clean(specTestPath))) + return filepath.Join(filepath.Clean(p), module), nil } func GetModulePath(name, version string) (string, error) { diff --git a/protocol/v2/types/bls.go b/protocol/v2/types/bls.go index 70d2b7cb0e..d4e2b39fb9 100644 --- a/protocol/v2/types/bls.go +++ b/protocol/v2/types/bls.go @@ -9,7 +9,7 @@ var blsPublicKeyCache *lru.Cache[string, bls.PublicKey] func init() { var err error - blsPublicKeyCache, err = lru.New[string, bls.PublicKey](10_000) + blsPublicKeyCache, err = lru.New[string, bls.PublicKey](128_000) if err != nil { panic(err) } diff --git a/protocol/v2/types/crypto.go b/protocol/v2/types/crypto.go index 24863a64cc..3f08b7ee5b 100644 --- a/protocol/v2/types/crypto.go +++ b/protocol/v2/types/crypto.go @@ -15,13 +15,11 @@ import ( // // TODO: rethink this function and consider moving/refactoring it. func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, domain spectypes.DomainType, sigType spectypes.SignatureType, operators []*spectypes.Operator) error { - // decode sig sign := &bls.Sign{} if err := sign.Deserialize(s); err != nil { return errors.Wrap(err, "failed to deserialize signature") } - // find operators pks := make([]bls.PublicKey, 0) for _, id := range data.GetSigners() { found := false @@ -41,13 +39,11 @@ func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, d } } - // compute root computedRoot, err := spectypes.ComputeSigningRoot(data, spectypes.ComputeSignatureDomain(domain, sigType)) if err != nil { return errors.Wrap(err, "could not compute signing root") } - // verify if res := sign.FastAggregateVerify(pks, computedRoot[:]); !res { return errors.New("failed to verify signature") } @@ -72,7 +68,6 @@ func VerifyReconstructedSignature(sig *bls.Sign, validatorPubKey []byte, root [3 return errors.Wrap(err, "could not deserialize validator pk") } - // verify reconstructed sig if res := sig.VerifyByte(&pk, root[:]); !res { return errors.New("could not reconstruct a valid signature") } diff --git a/protocol/v2/types/messages.go b/protocol/v2/types/messages.go index 121194142d..529b2ab821 100644 --- a/protocol/v2/types/messages.go +++ b/protocol/v2/types/messages.go @@ -34,6 +34,7 @@ type EventMsg struct { type TimeoutData struct { Height qbft.Height + Round qbft.Round } type ExecuteDutyData struct { @@ -57,11 +58,11 @@ func (m *EventMsg) GetExecuteDutyData() (*ExecuteDutyData, error) { } // Encode returns a msg encoded bytes or error -func (msg *EventMsg) Encode() ([]byte, error) { - return json.Marshal(msg) +func (m *EventMsg) Encode() ([]byte, error) { + return json.Marshal(m) } // Decode returns error if decoding failed -func (msg *EventMsg) Decode(data []byte) error { - return json.Unmarshal(data, &msg) +func (m *EventMsg) Decode(data []byte) error { + return json.Unmarshal(data, &m) } diff --git a/registry/storage/shares.go b/registry/storage/shares.go index 17572f0257..321bcd15c8 100644 --- a/registry/storage/shares.go +++ b/registry/storage/shares.go @@ -206,6 +206,13 @@ func ByActiveValidator() SharesFilter { } } +// ByAttesting filters for attesting validators. +func ByAttesting() SharesFilter { + return func(share *types.SSVShare) bool { + return share.HasBeaconMetadata() && share.BeaconMetadata.IsAttesting() + } +} + // ByClusterID filters by cluster id. func ByClusterID(clusterID []byte) SharesFilter { return func(share *types.SSVShare) bool { diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 641ad31360..2440971fe0 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -8,7 +8,11 @@ ApprovedChanges: ["256a3dc0f1eb7abf","22b66e9a63ba145b","12c1c3a1622fb7cc","1c44 "db32f358b6e8e2bb","f372e174e1f34c3b","bc47b3d202e8cd0d","86a6abca1a1c16d6","1655d21d5a4cad4","ac4e427097fc5533","6b4d5a114f8066ff", "9482fb9b6a953c48","5778a05e0976a6eb","24e2c7f54d5dd1d","2a8937e50d20faa9","587c629a67ef07ed","9d06d8e0ee4e1113","e624ec802068e711", "943be3ce709a99d3","5b3bb2d2262fe8be","c20c4c7ed8d1711d","b10c6fc7dd9eee7","c121cdaab6c1c698","e12b17f3910be26b","e47bf52e962c90af", - "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f"] + "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f", + "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575","2a580187c312c79a","bf8cf93c55c1eadb","6d877e24991465e4", + "b1c8e0148a4a755","2c25abb7c776bd54","a1754e08473bd1fa","4dbab14670fa155d","2a3667a499a23b16","930379d323dd95e8","65efe31656e8814f", + "1270cef2e573f846","aeafb38ca9114f12","2a83e3384b45f2d7","91fbb874b3ce2570","74ad51ca63526e1e","defd8406641d53a5"] + IgnoredIdentifiers: - logger ReducedPackageNames: diff --git a/utils/rsaencryption/testingspace/vars.go b/utils/rsaencryption/testingspace/vars.go index 27a90cc0de..f94a8da859 100644 --- a/utils/rsaencryption/testingspace/vars.go +++ b/utils/rsaencryption/testingspace/vars.go @@ -2,6 +2,7 @@ package testing var ( // SkPem is a operator private key + // #nosec G101 (Potential hardcoded credentials: RSA private key) SkPem = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAowE7OEbwyLkvrZ0TU4jjooyIFxNvgrY8Fj+WslyZTlyj8UDf\nFrYh5Un2u4YMdAe+cPf1XK+A/P9XX7OB4nf1OoGVB6wrC/jhLbvOH650ryUYopeY\nhlSXxGnD4vcvTvcqLLB+ue2/iySxQLpZR/6VsT3fFrEonzFTqnFCwCF28iPnJVBj\nX6T/HcTJ55IDkbtotarU6cwwNOHnHkzWrv7ityPkR4Ge11hmVG9QjROt56ehXfFs\nFo5MqSvqpYplXkI/zUNm8j/lqEdU0RXUr41L2hyKY/pVjsgmeTsN7/ZqACkHye9F\nbkV9V/VbTh7hWVLTqGSh7BY/D7gwOwfuKiq2TwIDAQABAoIBADjO3Qyn7JKHt44S\nCAI82thzkZo5M8uiJx652pMeom8k6h3SNe18XCPEuzBvbzeg20YTpHdA0vtZIeJA\ndSuwEs7pCj86SWZKvm9p3FQ+QHwpuYQwwP9Py/Svx4z6CIrEqPYaLJAvw2mCyCN+\nzk7A8vpqTa1i4H1ae4YTIuhCwWlxe1ttD6rVUYfC2rVaFJ+b8JlzFRq4bnAR8yme\nrE4iAlfgTOj9zL814qRlYQeeZhMvA8T0qWUohbr1imo5XzIJZayLocvqhZEbk0dj\nq9qKWdIpAATRjWvb+7PkjmlwNjLOhJ1phtCkc/S4j2cvo9gcS7WafxaqCl/ix4Yt\n5KvPJ8ECgYEA0Em4nMMEFXbuSM/l5UCzv3kT6H/TYO7FVh071G7QAFoloxJBZDFV\n7fHsc+uCimlG2Xt3CrGo9tsOnF/ZgDKNmtDvvjxmlPnAb5g4uhXgYNMsKQShpeRW\n/ay8CmWbsRqXZaLoI5br2kCTLwsVz2hpabAzBOr2YV3vMRB5i7COYSMCgYEAyFgL\n3DkKwsTTyVyplenoAZaS/o0mKxZnffRnHNP5QgRfT4pQkuogk+MYAeBuGsc4cTi7\nrTtytUMBABXEKGIJkAbNoASHQMUcO1vvcwhBW7Ay+oxuc0JSlnaXjowS0C0o/4qr\nQ/rpUneir+Vu/N8+6edETRkNj+5unmePEe9NBuUCgYEAgtUr31woHot8FcRxNdW0\nkpstRCe20PZqgjMOt9t7UB1P8uSuqo7K2RHTYuUWNHb4h/ejyNXbumPTA6q5Zmta\nw1pmnWo3TXCrze0iBNFlBazf2kwMdbW+Zs2vuCAm8dIwMylnA6PzNj7FtRETfBqr\nzDVfdsFYTcTBUGJ21qXqaV0CgYEAmuMPEEv9WMTo43VDGsaCeq/Zpvii+I7SphsM\nmMn8m6Bbu1e4oUxmsU7RoanMFeHNbiMpXW1namGJ5XHufDYHJJVN5Zd6pYV+JRoX\njjxkoyke0Hs/bNZqmS7ITwlWBiHT33Rqohzaw8oAObLMUq2ZqyYDtQNYa90vIkH3\n5yq1x00CgYEAs4ztQhGRbeUlqnW6Z6yfRJ6XXYqdMPhxuBxvNn/dxJ10T4W2DUuC\njSdpGXrY+ECYyXUwlXBqbaKx1K5AQD7nmu9J3l0oMkX6tSBj1OE5MabATrsW6wvT\nhkTPJZMyPUYhoBkivPUKyQXswrQV/nUQAsAcLeJShTW4gSs0M6weQAc=\n-----END RSA PRIVATE KEY-----\n" // EncryptedKeyBase64 SkPem in base64 format EncryptedKeyBase64 = "NW/6N5Ubo5T+oiT9My2wXFH5TWT7iQnN8YKUlcoFeg00OzL1S4yKrIPemdr7SM3EbPeHlBtOAM3z+06EmaNlwVdBiexSRJmgnknqwt/Ught4pKZK/WdJAEhMRwjZ3nx1Qi1TYcw7oZBaOdeTdm65QEAnsqOHk1htnUTXqsqYxVF750u8JWq3Mzr3oCN65ydSJRQoSa+lo3DikIDrXSYe1LRY5epMRrOq3cujuykuAVZQWp1vzv4w4V6mffmxaDbPpln/w28FKCxYkxG/WhwGuXR1GK6IWr3xpXPKcG+lzfvlmh4UiK1Lad/YD460oMXOKZT8apn4HL4tl9HOb6RyWQ==" diff --git a/utils/testutils.go b/utils/testutils.go new file mode 100644 index 0000000000..bfd9290b25 --- /dev/null +++ b/utils/testutils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "sync" + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/golang/mock/gomock" + + "github.com/bloxapp/ssv/networkconfig" + mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" +) + +type SlotValue struct { + mu sync.Mutex + slot phase0.Slot +} + +func (sv *SlotValue) SetSlot(s phase0.Slot) { + sv.mu.Lock() + defer sv.mu.Unlock() + sv.slot = s +} + +func (sv *SlotValue) GetSlot() phase0.Slot { + sv.mu.Lock() + defer sv.mu.Unlock() + return sv.slot +} + +func SetupMockBeaconNetwork(t *testing.T, currentSlot *SlotValue) *mocknetwork.MockBeaconNetwork { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + if currentSlot == nil { + currentSlot = &SlotValue{} + currentSlot.SetSlot(32) + } + + mockBeaconNetwork := mocknetwork.NewMockBeaconNetwork(ctrl) + mockBeaconNetwork.EXPECT().GetBeaconNetwork().Return(networkconfig.TestNetwork.Beacon.GetBeaconNetwork()).AnyTimes() + + mockBeaconNetwork.EXPECT().EstimatedCurrentSlot().DoAndReturn( + func() phase0.Slot { + return currentSlot.GetSlot() + }, + ).AnyTimes() + mockBeaconNetwork.EXPECT().EstimatedEpochAtSlot(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) phase0.Epoch { + return phase0.Epoch(slot / 32) + }, + ).AnyTimes() + + return mockBeaconNetwork +} From f9d586917ba1617ff32706d61c2a1fd0165f3827 Mon Sep 17 00:00:00 2001 From: systemblox <40427708+systemblox@users.noreply.github.com> Date: Mon, 30 Oct 2023 13:22:41 +0200 Subject: [PATCH 25/54] Removed ci/cd for prater (#1182) Co-authored-by: stoyan.peev --- .gitlab-ci.yml | 46 ----- .k8/stage/boot-node-2-deployment.yml | 116 ----------- .k8/stage/boot-node-deployment.yml | 112 ----------- .k8/stage/lb-boot-node.yml | 19 -- .k8/stage/scripts/deploy-boot-nodes.sh | 128 ------------ .k8/stage/scripts/deploy-cluster-1--4.sh | 131 ------------ .k8/stage/scripts/deploy-cluster-5--8.sh | 131 ------------ .k8/stage/scripts/deploy-exporters.sh | 105 ---------- .k8/stage/scripts/deploy-holesky-exporters.sh | 104 ---------- .k8/stage/ssv-exporter-1.yml | 187 ------------------ .k8/stage/ssv-exporter-2.yml | 187 ------------------ .k8/stage/ssv-node-v2-1-deployment.yml | 161 --------------- .k8/stage/ssv-node-v2-2-deployment.yml | 165 ---------------- .k8/stage/ssv-node-v2-3-deployment.yml | 165 ---------------- .k8/stage/ssv-node-v2-4-deployment.yml | 165 ---------------- .k8/stage/ssv-node-v2-5-deployment.yml | 159 --------------- .k8/stage/ssv-node-v2-6-deployment.yml | 161 --------------- .k8/stage/ssv-node-v2-7-deployment.yml | 161 --------------- .k8/stage/ssv-node-v2-8-deployment.yml | 161 --------------- 19 files changed, 2564 deletions(-) delete mode 100644 .k8/stage/boot-node-2-deployment.yml delete mode 100644 .k8/stage/boot-node-deployment.yml delete mode 100644 .k8/stage/lb-boot-node.yml delete mode 100755 .k8/stage/scripts/deploy-boot-nodes.sh delete mode 100755 .k8/stage/scripts/deploy-cluster-1--4.sh delete mode 100755 .k8/stage/scripts/deploy-cluster-5--8.sh delete mode 100755 .k8/stage/scripts/deploy-exporters.sh delete mode 100755 .k8/stage/scripts/deploy-holesky-exporters.sh delete mode 100644 .k8/stage/ssv-exporter-1.yml delete mode 100644 .k8/stage/ssv-exporter-2.yml delete mode 100644 .k8/stage/ssv-node-v2-1-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-2-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-3-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-4-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-5-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-6-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-7-deployment.yml delete mode 100644 .k8/stage/ssv-node-v2-8-deployment.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f296289e6e..2faf940701 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -42,52 +42,6 @@ Build stage Docker image: only: - stage -Deploy nodes to stage: - stage: deploy - tags: - - blox-infra-stage - script: - - apk add bash - - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION - - export SSV_NODES_CPU_LIMIT=$STAGE_SSV_NODES_CPU_LIMIT - - export SSV_NODES_MEM_LIMIT=$STAGE_SSV_NODES_MEM_LIMIT - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl - - chmod 755 kubectl - - mv kubectl /usr/bin/ - # - # +--------------------+ - # | Deploy SSV nodes | - # +--------------------+ - - .k8/stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - - .k8/stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - # - .k8/stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - # - .k8/stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - # - # +-------------------+ - # │ Deploy Bootnode | - # +-------------------+ - # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ - # - .k8/stage/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE 1000m 1000m - only: - - stage - -Deploy exporter to stage: - stage: deploy - tags: - - blox-infra-stage - script: - - apk add bash - - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION - - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT - - export SSV_EXPORTER_MEM_LIMIT=$STAGE_SSV_EXPORTER_MEM_LIMIT - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl - - chmod 755 kubectl - - mv kubectl /usr/bin/ - - .k8/stage/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT - only: - - stage - - # +---------------------+ # | STAGE HETZNER NODES | # +---------------------+ diff --git a/.k8/stage/boot-node-2-deployment.yml b/.k8/stage/boot-node-2-deployment.yml deleted file mode 100644 index 48bb6d8e1e..0000000000 --- a/.k8/stage/boot-node-2-deployment.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: boot-node-2 - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" - gateways: - - boot-node-2 - http: - - route: - - destination: - host: boot-node-2-svc - port: - number: 5001 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: boot-node-2 - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" ---- -apiVersion: v1 -kind: Service -metadata: - name: boot-node-2-svc - namespace: REPLACE_NAMESPACE - labels: - app: boot-node-2 -spec: - type: ClusterIP - ports: - - port: 5679 - protocol: TCP - targetPort: 5679 - name: port-5679 - - port: 4001 - protocol: UDP - targetPort: 4001 - name: port-4001 - - port: 5001 - protocol: TCP - targetPort: 5001 - name: port-5001 - selector: - app: boot-node-2 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: boot-node-2 - name: boot-node-2 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: boot-node-2 - template: - metadata: - labels: - app: boot-node-2 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-boot-node - containers: - - name: boot-node-2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - command: ["make", "start-boot-node"] - ports: - - containerPort: 5001 - name: port-5001 - hostPort: 5001 - env: - - name: BOOT_NODE_PRIVATE_KEY - valueFrom: - secretKeyRef: - name: config-secrets - key: boot_node_2_private_key - - name: BOOT_NODE_EXTERNAL_IP - valueFrom: - secretKeyRef: - name: config-secrets - key: boot_node_external_ip - - name: TCP_PORT - value: "5001" - - name: UDP_PORT - value: "4001" - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/boot-node-deployment.yml b/.k8/stage/boot-node-deployment.yml deleted file mode 100644 index 7b974cbc7d..0000000000 --- a/.k8/stage/boot-node-deployment.yml +++ /dev/null @@ -1,112 +0,0 @@ ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: boot-node - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" - gateways: - - boot-node - http: - - route: - - destination: - host: boot-node-svc - port: - number: 5000 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: boot-node - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" ---- -apiVersion: v1 -kind: Service -metadata: - name: boot-node-svc - namespace: REPLACE_NAMESPACE - labels: - app: boot-node -spec: - type: ClusterIP - ports: - - port: 5678 - protocol: TCP - targetPort: 5678 - name: port-5678 - - port: 4000 - protocol: UDP - targetPort: 4000 - name: port-4000 - - port: 5000 - protocol: TCP - targetPort: 5000 - name: port-5000 - selector: - app: boot-node ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: boot-node - name: boot-node - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: boot-node - template: - metadata: - labels: - app: boot-node - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-boot-node - containers: - - name: boot-node - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - command: ["make", "start-boot-node"] - ports: - - containerPort: 5000 - name: port-5000 - hostPort: 5000 - env: - - name: BOOT_NODE_PRIVATE_KEY - valueFrom: - secretKeyRef: - name: config-secrets - key: boot_node_private_key - - name: BOOT_NODE_EXTERNAL_IP - valueFrom: - secretKeyRef: - name: config-secrets - key: boot_node_external_ip - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/lb-boot-node.yml b/.k8/stage/lb-boot-node.yml deleted file mode 100644 index 07f84a1d0b..0000000000 --- a/.k8/stage/lb-boot-node.yml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: boot-node-lb-svc - namespace: ssv - annotations: - "external-dns.alpha.kubernetes.io/hostname": ssv-lb.stage.bloxinfra.com - "service.beta.kubernetes.io/aws-load-balancer-internal": "0.0.0.0/0" - labels: - app: boot-node -spec: - type: LoadBalancer - ports: - - port: 5000 - protocol: TCP - targetPort: 5000 - name: port-5000 - selector: - app: boot-node diff --git a/.k8/stage/scripts/deploy-boot-nodes.sh b/.k8/stage/scripts/deploy-boot-nodes.sh deleted file mode 100755 index 738727c99d..0000000000 --- a/.k8/stage/scripts/deploy-boot-nodes.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Pleae provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z $9 ]]; then - echo "Please provide health check image" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide nodes cpu limit" - exit 1 -fi - -if [[ -z ${11} ]]; then - echo "Please provide nodes mem limit" - exit 1 -fi - - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -HEALTH_CHECK_IMAGE=$9 -NODES_CPU_LIMIT=${10} -NODES_MEM_LIMIT=${11} - - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $HEALTH_CHECK_IMAGE -echo $NODES_CPU_LIMIT -echo $NODES_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -#config -#if [[ -d .k8/configmaps/ ]]; then -#config - #for file in $(ls -A1 .k8/configmaps/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" - #done -#fi - -#if [[ -d .k8/secrets/ ]]; then - #for file in $(ls -A1 .k8/secrets/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" - #done -#fi - -DIR=".k8/stage" -DEPLOY_FILES=( - "boot-node-deployment.yml" - "boot-node-2-deployment.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ - -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ - -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -kubectl --context=$K8S_CONTEXT apply -f .k8/stage/boot-node-deployment.yml || exit 1 -kubectl --context=$K8S_CONTEXT apply -f .k8/stage/boot-node-2-deployment.yml || exit 1 \ No newline at end of file diff --git a/.k8/stage/scripts/deploy-cluster-1--4.sh b/.k8/stage/scripts/deploy-cluster-1--4.sh deleted file mode 100755 index 5516cb4e39..0000000000 --- a/.k8/stage/scripts/deploy-cluster-1--4.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Pleae provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z $9 ]]; then - echo "Please provide health check image" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide nodes cpu limit" - exit 1 -fi - -if [[ -z ${11} ]]; then - echo "Please provide nodes mem limit" - exit 1 -fi - - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -HEALTH_CHECK_IMAGE=$9 -NODES_CPU_LIMIT=${10} -NODES_MEM_LIMIT=${11} - - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $HEALTH_CHECK_IMAGE -echo $NODES_CPU_LIMIT -echo $NODES_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -#config -#if [[ -d .k8/configmaps/ ]]; then -#config - #for file in $(ls -A1 .k8/configmaps/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" - #done -#fi - -#if [[ -d .k8/secrets/ ]]; then - #for file in $(ls -A1 .k8/secrets/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" - #done -#fi - -DIR=".k8/stage" -DEPLOY_FILES=( - "ssv-node-v2-1-deployment.yml" - "ssv-node-v2-2-deployment.yml" - "ssv-node-v2-3-deployment.yml" - "ssv-node-v2-4-deployment.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ - -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ - -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -for file in "${DEPLOY_FILES[@]}"; do - kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done diff --git a/.k8/stage/scripts/deploy-cluster-5--8.sh b/.k8/stage/scripts/deploy-cluster-5--8.sh deleted file mode 100755 index e3bbadd102..0000000000 --- a/.k8/stage/scripts/deploy-cluster-5--8.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Pleae provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z $9 ]]; then - echo "Please provide health check image" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide nodes cpu limit" - exit 1 -fi - -if [[ -z ${11} ]]; then - echo "Please provide nodes mem limit" - exit 1 -fi - - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -HEALTH_CHECK_IMAGE=$9 -NODES_CPU_LIMIT=${10} -NODES_MEM_LIMIT=${11} - - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $HEALTH_CHECK_IMAGE -echo $NODES_CPU_LIMIT -echo $NODES_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -#config -#if [[ -d .k8/configmaps/ ]]; then -#config - #for file in $(ls -A1 .k8/configmaps/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" - #done -#fi - -#if [[ -d .k8/secrets/ ]]; then - #for file in $(ls -A1 .k8/secrets/); do - #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" - #done -#fi - -DIR=".k8/stage" -DEPLOY_FILES=( - "ssv-node-v2-5-deployment.yml" - "ssv-node-v2-6-deployment.yml" - "ssv-node-v2-7-deployment.yml" - "ssv-node-v2-8-deployment.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ - -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ - -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -for file in "${DEPLOY_FILES[@]}"; do - kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done diff --git a/.k8/stage/scripts/deploy-exporters.sh b/.k8/stage/scripts/deploy-exporters.sh deleted file mode 100755 index 051e527cfa..0000000000 --- a/.k8/stage/scripts/deploy-exporters.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Please provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z ${9} ]]; then - echo "Please provide exporter cpu limit" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide exporter cpu limit" - exit 1 -fi - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -EXPORTER_CPU_LIMIT=$9 -EXPORTER_MEM_LIMIT=${10} - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $EXPORTER_CPU_LIMIT -echo $EXPORTER_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -DIR=".k8/stage" -DEPLOY_FILES=( - "ssv-exporter-1.yml" - "ssv-exporter-2.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ - -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -for file in "${DEPLOY_FILES[@]}"; do - kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done diff --git a/.k8/stage/scripts/deploy-holesky-exporters.sh b/.k8/stage/scripts/deploy-holesky-exporters.sh deleted file mode 100755 index 9a899ef3d3..0000000000 --- a/.k8/stage/scripts/deploy-holesky-exporters.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash - -set -x - -if [[ -z $1 ]]; then - echo "Please provide DOCKERREPO" - exit 1 -fi - -if [[ -z $2 ]]; then - echo "Please provide IMAGETAG" - exit 1 -fi - -if [[ -z $3 ]]; then - echo "Please provide NAMESPACE" - exit 1 -fi - -if [[ -z $4 ]]; then - echo "Please provide number of replicas" - exit 1 -fi - -if [[ -z $5 ]]; then - echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" - exit 1 -fi - -if [[ -z $6 ]]; then - echo "Please provide k8s context" - exit 1 -fi - -if [[ -z $7 ]]; then - echo "Please provide domain suffix" - exit 1 -fi - -if [[ -z ${8} ]]; then - echo "Please provide k8s app version" - exit 1 -fi - -if [[ -z ${9} ]]; then - echo "Please provide exporter cpu limit" - exit 1 -fi - -if [[ -z ${10} ]]; then - echo "Please provide exporter cpu limit" - exit 1 -fi - -DOCKERREPO=$1 -IMAGETAG=$2 -NAMESPACE=$3 -REPLICAS=$4 -DEPL_TYPE=$5 -K8S_CONTEXT=$6 -DOMAIN_SUFFIX=$7 -K8S_API_VERSION=$8 -EXPORTER_CPU_LIMIT=$9 -EXPORTER_MEM_LIMIT=${10} - -echo $DOCKERREPO -echo $IMAGETAG -echo $NAMESPACE -echo $REPLICAS -echo $DEPL_TYPE -echo $K8S_CONTEXT -echo $DOMAIN_SUFFIX -echo $K8S_API_VERSION -echo $EXPORTER_CPU_LIMIT -echo $EXPORTER_MEM_LIMIT - -# create namespace if not exists -if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then - echo "$NAMESPACE created" - kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE -fi - -DIR=".k8/hetzner-stage" -DEPLOY_FILES=( - "ssv-exporter-holesky.yml" -) - -if [[ -d $DIR ]]; then - for file in "${DEPLOY_FILES[@]}"; do - sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ - -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ - -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ - -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ - -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ - -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ - -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 - done -fi - -#deploy -for file in "${DEPLOY_FILES[@]}"; do - kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 -done diff --git a/.k8/stage/ssv-exporter-1.yml b/.k8/stage/ssv-exporter-1.yml deleted file mode 100644 index 8433e50901..0000000000 --- a/.k8/stage/ssv-exporter-1.yml +++ /dev/null @@ -1,187 +0,0 @@ ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: ssv-exporter - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ws-exporter.REPLACE_DOMAIN_SUFFIX" - gateways: - - ssv-exporter - http: - - route: - - destination: - host: ssv-exporter - port: - number: 14000 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: ssv-exporter - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway-int - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ws-exporter.REPLACE_DOMAIN_SUFFIX" ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-exporter - namespace: REPLACE_NAMESPACE - labels: - app: ssv-exporter -spec: - type: ClusterIP - ports: - - port: 12000 - protocol: UDP - targetPort: 12000 - name: port-12000 - - port: 13000 - protocol: TCP - targetPort: 13000 - name: port-13000 - - port: 14000 - protocol: TCP - targetPort: 14000 - name: port-14000 - - port: 15000 - protocol: TCP - targetPort: 15000 - name: port-15000 - - port: 16000 - protocol: TCP - targetPort: 16000 - name: port-16000 - selector: - app: ssv-exporter ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-exporter - name: ssv-exporter - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-exporter - template: - metadata: - labels: - app: ssv-exporter - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-exporter - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_EXPORTER_CPU_LIMIT - memory: REPLACE_EXPORTER_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12000 - name: port-12000 - hostPort: 12000 - protocol: UDP - - containerPort: 13000 - name: port-13000 - hostPort: 13000 - - containerPort: 14000 - name: port-14000 - hostPort: 14000 - - containerPort: 15000 - name: port-15000 - hostPort: 15000 - - containerPort: 16000 - name: port-16000 - hostPort: 16000 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv.*" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15000" - - name: SSV_API_PORT - value: "16000" - - name: ENABLE_PROFILE - value: "true" - - name: UDP_PORT - value: "12000" - - name: TCP_PORT - value: "13000" - - name: WS_API_PORT - value: "14000" - - name: FULLNODE - value: "true" - - name: EXPORTER - value: "true" - - name: DISCOVERY_TRACE - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: SUBNETS - value: "0xffffffffffffffffffffffffffffffff" - volumeMounts: - - mountPath: /data - name: ssv-exporter - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-exporter-cm - volumes: - - name: ssv-exporter - persistentVolumeClaim: - claimName: ssv-exporter - - name: ssv-exporter-cm - configMap: - name: ssv-exporter-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-exporter-2.yml b/.k8/stage/ssv-exporter-2.yml deleted file mode 100644 index 7bcef4937a..0000000000 --- a/.k8/stage/ssv-exporter-2.yml +++ /dev/null @@ -1,187 +0,0 @@ ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: ssv-exporter-v2 - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ws-exporter-v2.REPLACE_DOMAIN_SUFFIX" - gateways: - - ssv-exporter-v2 - http: - - route: - - destination: - host: ssv-exporter-v2 - port: - number: 14007 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: ssv-exporter-v2 - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway-int - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ws-exporter-v2.REPLACE_DOMAIN_SUFFIX" ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-exporter-v2 - namespace: REPLACE_NAMESPACE - labels: - app: ssv-exporter-v2 -spec: - type: ClusterIP - ports: - - port: 12007 - protocol: UDP - targetPort: 12007 - name: port-12007 - - port: 13007 - protocol: TCP - targetPort: 13007 - name: port-13007 - - port: 14007 - protocol: TCP - targetPort: 14007 - name: port-14007 - - port: 15007 - protocol: TCP - targetPort: 15007 - name: port-15007 - - port: 16007 - protocol: TCP - targetPort: 16007 - name: port-16007 - selector: - app: ssv-exporter-v2 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-exporter-v2 - name: ssv-exporter-v2 - namespace: REPLACE_NAMESPACE -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-exporter-v2 - template: - metadata: - labels: - app: ssv-exporter-v2 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-exporter-v2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_EXPORTER_CPU_LIMIT - memory: REPLACE_EXPORTER_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12007 - name: port-12007 - hostPort: 12007 - protocol: UDP - - containerPort: 13007 - name: port-13007 - hostPort: 13007 - - containerPort: 14007 - name: port-14007 - hostPort: 14007 - - containerPort: 15007 - name: port-15007 - hostPort: 15007 - - containerPort: 16007 - name: port-16007 - hostPort: 16007 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv.*" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15007" - - name: SSV_API_PORT - value: "16007" - - name: ENABLE_PROFILE - value: "true" - - name: UDP_PORT - value: "12007" - - name: TCP_PORT - value: "13007" - - name: WS_API_PORT - value: "14007" - - name: FULLNODE - value: "true" - - name: EXPORTER - value: "true" - - name: DISCOVERY_TRACE - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: SUBNETS - value: "0xffffffffffffffffffffffffffffffff" - volumeMounts: - - mountPath: /data - name: ssv-exporter-v2 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-exporter-v2-cm - volumes: - - name: ssv-exporter-v2 - persistentVolumeClaim: - claimName: ssv-exporter-v2 - - name: ssv-exporter-v2-cm - configMap: - name: ssv-exporter-v2-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-1-deployment.yml b/.k8/stage/ssv-node-v2-1-deployment.yml deleted file mode 100644 index a62ac399cb..0000000000 --- a/.k8/stage/ssv-node-v2-1-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-1-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-1 -spec: - type: ClusterIP - ports: - - port: 12001 - protocol: UDP - targetPort: 12001 - name: port-12001 - - port: 13001 - protocol: TCP - targetPort: 13001 - name: port-13001 - - port: 15001 - protocol: TCP - targetPort: 15001 - name: port-15001 - - port: 16001 - protocol: TCP - targetPort: 16001 - name: port-16001 - selector: - app: ssv-node-v2-1 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-1 - name: ssv-node-v2-1 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-1 - template: - metadata: - labels: - app: ssv-node-v2-1 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-1 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12001 - name: port-12001 - hostPort: 12001 - protocol: UDP - - containerPort: 13001 - name: port-13001 - hostPort: 13001 - - containerPort: 15001 - name: port-15001 - hostPort: 15001 - - containerPort: 16001 - name: port-16001 - hostPort: 16001 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15001" - - name: SSV_API_PORT - value: "16001" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-1 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-1 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-1 - persistentVolumeClaim: - claimName: ssv-node-v2-1 - - name: ssv-cm-validator-options-1 - configMap: - name: ssv-cm-validator-options-1 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-2-deployment.yml b/.k8/stage/ssv-node-v2-2-deployment.yml deleted file mode 100644 index bc728de072..0000000000 --- a/.k8/stage/ssv-node-v2-2-deployment.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-2-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-2 -spec: - type: ClusterIP - ports: - - port: 12002 - protocol: UDP - targetPort: 12002 - name: port-12002 - - port: 13002 - protocol: TCP - targetPort: 13002 - name: port-13002 - - port: 15002 - protocol: TCP - targetPort: 15002 - name: port-15002 - - port: 16002 - protocol: TCP - targetPort: 16002 - name: port-16002 - selector: - app: ssv-node-v2-2 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-2 - name: ssv-node-v2-2 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-2 - template: - metadata: - labels: - app: ssv-node-v2-2 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12002 - name: port-12002 - protocol: UDP - hostPort: 12002 - - containerPort: 13002 - name: port-13002 - hostPort: 13002 - - containerPort: 15002 - name: port-15002 - hostPort: 15002 - - containerPort: 16002 - name: port-16002 - hostPort: 16002 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: TCP_PORT - value: "13002" - - name: UDP_PORT - value: "12002" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15002" - - name: SSV_API_PORT - value: "16002" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-2 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-2 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-2 - persistentVolumeClaim: - claimName: ssv-node-v2-2 - - name: ssv-cm-validator-options-2 - configMap: - name: ssv-cm-validator-options-2 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-3-deployment.yml b/.k8/stage/ssv-node-v2-3-deployment.yml deleted file mode 100644 index 81ca74db36..0000000000 --- a/.k8/stage/ssv-node-v2-3-deployment.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-3-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-3 -spec: - type: ClusterIP - ports: - - port: 12003 - protocol: UDP - targetPort: 12003 - name: port-12003 - - port: 13003 - protocol: TCP - targetPort: 13003 - name: port-13003 - - port: 15003 - protocol: TCP - targetPort: 15003 - name: port-15003 - - port: 16003 - protocol: TCP - targetPort: 16003 - name: port-16003 - selector: - app: ssv-node-v2-3 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-3 - name: ssv-node-v2-3 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-3 - template: - metadata: - labels: - app: ssv-node-v2-3 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-3 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12003 - name: port-12003 - protocol: UDP - hostPort: 12003 - - containerPort: 13003 - name: port-13003 - hostPort: 13003 - - containerPort: 15003 - name: port-15003 - hostPort: 15003 - - containerPort: 16003 - name: port-16003 - hostPort: 16003 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: TCP_PORT - value: "13003" - - name: UDP_PORT - value: "12003" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15003" - - name: SSV_API_PORT - value: "16003" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-3 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-3 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-3 - persistentVolumeClaim: - claimName: ssv-node-v2-3 - - name: ssv-cm-validator-options-3 - configMap: - name: ssv-cm-validator-options-3 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-4-deployment.yml b/.k8/stage/ssv-node-v2-4-deployment.yml deleted file mode 100644 index a1b98d28a1..0000000000 --- a/.k8/stage/ssv-node-v2-4-deployment.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-4-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-4 -spec: - type: ClusterIP - ports: - - port: 12004 - protocol: UDP - targetPort: 12004 - name: port-12004 - - port: 13004 - protocol: TCP - targetPort: 13004 - name: port-13004 - - port: 15004 - protocol: TCP - targetPort: 15004 - name: port-15004 - - port: 16004 - protocol: TCP - targetPort: 16004 - name: port-16004 - selector: - app: ssv-node-v2-4 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-4 - name: ssv-node-v2-4 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-4 - template: - metadata: - labels: - app: ssv-node-v2-4 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-4 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12004 - name: port-12004 - protocol: UDP - hostPort: 12004 - - containerPort: 13004 - name: port-13004 - hostPort: 13004 - - containerPort: 15004 - name: port-15004 - hostPort: 15004 - - containerPort: 16004 - name: port-16004 - hostPort: 16004 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: TCP_PORT - value: "13004" - - name: UDP_PORT - value: "12004" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15004" - - name: SSV_API_PORT - value: "16004" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-4 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-4 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-4 - persistentVolumeClaim: - claimName: ssv-node-v2-4 - - name: ssv-cm-validator-options-4 - configMap: - name: ssv-cm-validator-options-4 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-5-deployment.yml b/.k8/stage/ssv-node-v2-5-deployment.yml deleted file mode 100644 index c7446bafdf..0000000000 --- a/.k8/stage/ssv-node-v2-5-deployment.yml +++ /dev/null @@ -1,159 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-5-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-5 -spec: - type: ClusterIP - ports: - - port: 12005 - protocol: UDP - targetPort: 12005 - name: port-12005 - - port: 13005 - protocol: TCP - targetPort: 13005 - name: port-13005 - - port: 15005 - protocol: TCP - targetPort: 15005 - name: port-15005 - - port: 16005 - protocol: TCP - targetPort: 16005 - name: port-16005 - selector: - app: ssv-node-v2-5 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-5 - name: ssv-node-v2-5 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-5 - template: - metadata: - labels: - app: ssv-node-v2-5 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-5 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12005 - name: port-12005 - protocol: UDP - hostPort: 12005 - - containerPort: 13005 - name: port-13005 - hostPort: 13005 - - containerPort: 15005 - name: port-15005 - hostPort: 15005 - - containerPort: 16005 - name: port-16005 - hostPort: 16005 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15005" - - name: SSV_API_PORT - value: "16005" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "false" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-5 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-5 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-5 - persistentVolumeClaim: - claimName: ssv-node-v2-5 - - name: ssv-cm-validator-options-5 - configMap: - name: ssv-cm-validator-options-5 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-6-deployment.yml b/.k8/stage/ssv-node-v2-6-deployment.yml deleted file mode 100644 index b56673db9e..0000000000 --- a/.k8/stage/ssv-node-v2-6-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-6-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-6 -spec: - type: ClusterIP - ports: - - port: 12006 - protocol: UDP - targetPort: 12006 - name: port-12006 - - port: 13006 - protocol: TCP - targetPort: 13006 - name: port-13006 - - port: 15006 - protocol: TCP - targetPort: 15006 - name: port-15006 - - port: 16006 - protocol: TCP - targetPort: 16006 - name: port-16006 - selector: - app: ssv-node-v2-6 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-6 - name: ssv-node-v2-6 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-6 - template: - metadata: - labels: - app: ssv-node-v2-6 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-6 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12006 - name: port-12006 - protocol: UDP - hostPort: 12006 - - containerPort: 13006 - name: port-13006 - hostPort: 13006 - - containerPort: 15006 - name: port-15006 - hostPort: 15006 - - containerPort: 16006 - name: port-16006 - hostPort: 16006 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15006" - - name: SSV_API_PORT - value: "16006" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-6 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-6 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-6 - persistentVolumeClaim: - claimName: ssv-node-v2-6 - - name: ssv-cm-validator-options-6 - configMap: - name: ssv-cm-validator-options-6 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-7-deployment.yml b/.k8/stage/ssv-node-v2-7-deployment.yml deleted file mode 100644 index 4e61986511..0000000000 --- a/.k8/stage/ssv-node-v2-7-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-7-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-7 -spec: - type: ClusterIP - ports: - - port: 12007 - protocol: UDP - targetPort: 12007 - name: port-12007 - - port: 13007 - protocol: TCP - targetPort: 13007 - name: port-13007 - - port: 15007 - protocol: TCP - targetPort: 15007 - name: port-15007 - - port: 16007 - protocol: TCP - targetPort: 16007 - name: port-16007 - selector: - app: ssv-node-v2-7 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-7 - name: ssv-node-v2-7 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-7 - template: - metadata: - labels: - app: ssv-node-v2-7 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-7 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12007 - name: port-12007 - protocol: UDP - hostPort: 12007 - - containerPort: 13007 - name: port-13007 - hostPort: 13007 - - containerPort: 15007 - name: port-15007 - hostPort: 15007 - - containerPort: 16007 - name: port-16007 - hostPort: 16007 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15007" - - name: SSV_API_PORT - value: "16007" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "false" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-7 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-7 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-7 - persistentVolumeClaim: - claimName: ssv-node-v2-7 - - name: ssv-cm-validator-options-7 - configMap: - name: ssv-cm-validator-options-7 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-8-deployment.yml b/.k8/stage/ssv-node-v2-8-deployment.yml deleted file mode 100644 index 745fb3a3ea..0000000000 --- a/.k8/stage/ssv-node-v2-8-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-8-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-8 -spec: - type: ClusterIP - ports: - - port: 12008 - protocol: UDP - targetPort: 12008 - name: port-12008 - - port: 13008 - protocol: TCP - targetPort: 13008 - name: port-13008 - - port: 15008 - protocol: TCP - targetPort: 15008 - name: port-15008 - - port: 16008 - protocol: TCP - targetPort: 16008 - name: port-16008 - selector: - app: ssv-node-v2-8 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-8 - name: ssv-node-v2-8 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-8 - template: - metadata: - labels: - app: ssv-node-v2-8 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-8 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12008 - name: port-12008 - protocol: UDP - hostPort: 12008 - - containerPort: 13008 - name: port-13008 - hostPort: 13008 - - containerPort: 15008 - name: port-15008 - hostPort: 15008 - - containerPort: 16008 - name: port-16008 - hostPort: 16008 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15008" - - name: SSV_API_PORT - value: "16008" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-8 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-8 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-8 - persistentVolumeClaim: - claimName: ssv-node-v2-8 - - name: ssv-cm-validator-options-8 - configMap: - name: ssv-cm-validator-options-8 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true From db963c90bb21d4044dd01c2ecdb1a863a2938c2d Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Mon, 30 Oct 2023 13:35:10 +0200 Subject: [PATCH 26/54] Domain Type Discovery (#1039) * go mod tidy * refactors & added tests * review changes --- network/discovery/dv5_service.go | 96 +++++++----- network/discovery/dv5_service_test.go | 140 ++++++++++++++++++ network/discovery/node_record.go | 33 +++++ network/discovery/options.go | 2 - network/discovery/service.go | 4 + network/p2p/p2p_setup.go | 1 - network/peers/conn_manager_test.go | 2 +- .../connections/mock/mock_connection_index.go | 32 ++++ network/peers/peers_index.go | 2 +- network/peers/subnets.go | 2 +- network/peers/subnets_test.go | 2 +- network/records/entries.go | 84 +++++++++++ network/records/subnets.go | 33 +---- 13 files changed, 355 insertions(+), 78 deletions(-) create mode 100644 network/discovery/dv5_service_test.go create mode 100644 network/discovery/node_record.go create mode 100644 network/peers/connections/mock/mock_connection_index.go create mode 100644 network/records/entries.go diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index ee35c3f794..7c1c58150f 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -3,6 +3,8 @@ package discovery import ( "bytes" "context" + "encoding/hex" + "fmt" "net" "sync/atomic" "time" @@ -13,6 +15,8 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/network/commons" @@ -54,7 +58,8 @@ type DiscV5Service struct { publishState int32 conn *net.UDPConn - subnets []byte + domainType spectypes.DomainType + subnets []byte } func newDiscV5Service(pctx context.Context, logger *zap.Logger, discOpts *Options) (Service, error) { @@ -65,6 +70,7 @@ func newDiscV5Service(pctx context.Context, logger *zap.Logger, discOpts *Option publishState: publishStateReady, conns: discOpts.ConnIndex, subnetsIdx: discOpts.SubnetsIdx, + domainType: discOpts.DomainType, subnets: discOpts.DiscV5Opts.Subnets, } @@ -121,37 +127,58 @@ func (dvs *DiscV5Service) Node(logger *zap.Logger, info peer.AddrInfo) (*enode.N // if we reached peers limit, make sure to accept peers with more than 1 shared subnet, // which lets other components to determine whether we'll want to connect to this node or not. func (dvs *DiscV5Service) Bootstrap(logger *zap.Logger, handler HandleNewPeer) error { - zeroSubnets, _ := records.Subnets{}.FromString(records.ZeroSubnets) + logger = logger.Named(logging.NameDiscoveryService) dvs.discover(dvs.ctx, func(e PeerEvent) { - nodeSubnets, err := records.GetSubnetsEntry(e.Node.Record()) + logger := logger.With( + fields.ENR(e.Node), + fields.PeerID(e.AddrInfo.ID), + ) + err := dvs.checkPeer(logger, e) if err != nil { - logger.Debug("could not read subnets", fields.ENR(e.Node)) - return - } - if bytes.Equal(zeroSubnets, nodeSubnets) { - logger.Debug("skipping zero subnets", fields.ENR(e.Node)) + logger.Debug("discovered peer was dropped", zap.Error(err)) return } - updated := dvs.subnetsIdx.UpdatePeerSubnets(e.AddrInfo.ID, nodeSubnets) - if updated { - logger.Debug("[discv5] peer subnets were updated", fields.ENR(e.Node), - fields.PeerID(e.AddrInfo.ID), - fields.Subnets(records.Subnets(nodeSubnets))) - } - if !dvs.limitNodeFilter(e.Node) { - if !dvs.sharedSubnetsFilter(1)(e.Node) { - metricRejectedNodes.Inc() - return - } - } - metricFoundNodes.Inc() handler(e) }, defaultDiscoveryInterval) // , dvs.forkVersionFilter) //, dvs.badNodeFilter) return nil } +var zeroSubnets, _ = records.Subnets{}.FromString(records.ZeroSubnets) + +func (dvs *DiscV5Service) checkPeer(logger *zap.Logger, e PeerEvent) error { + // Get the peer's domain type, skipping if it mismatches ours. + // TODO: uncomment errors once there are sufficient nodes with domain type. + nodeDomainType, err := records.GetDomainTypeEntry(e.Node.Record()) + if err != nil { + // return fmt.Errorf("could not read domain type: %w", err) + logger.Debug("could not read domain type entry", zap.Error(err)) + } else if nodeDomainType != dvs.domainType { + // return errors.New("different domain type") + logger.Debug("skipping different domain type entry", zap.String("domain_type", hex.EncodeToString(nodeDomainType[:]))) + } else { + logger.Debug("discovered node with matching domain type", zap.String("domain_type", hex.EncodeToString(nodeDomainType[:]))) + } + + // Get the peer's subnets, skipping if it has none. + nodeSubnets, err := records.GetSubnetsEntry(e.Node.Record()) + if err != nil { + return fmt.Errorf("could not read subnets: %w", err) + } + if bytes.Equal(zeroSubnets, nodeSubnets) { + return errors.New("zero subnets") + } + + dvs.subnetsIdx.UpdatePeerSubnets(e.AddrInfo.ID, nodeSubnets) + if !dvs.limitNodeFilter(e.Node) && !dvs.sharedSubnetsFilter(1)(e.Node) { + metricRejectedNodes.Inc() + return errors.New("no shared subnets") + } + metricFoundNodes.Inc() + return nil +} + // initDiscV5Listener creates a new listener and starts it func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Options) error { opts := discOpts.DiscV5Opts @@ -184,7 +211,7 @@ func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Optio dvs.bootnodes = dv5Cfg.Bootnodes logger.Debug("started discv5 listener (UDP)", fields.BindIP(bindIP), - zap.Int("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.OperatorIDStr(opts.OperatorID)) + zap.Int("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.Domain(discOpts.DomainType)) return nil } @@ -306,31 +333,22 @@ func (dvs *DiscV5Service) createLocalNode(logger *zap.Logger, discOpts *Options, if err != nil { return nil, errors.Wrap(err, "could not add configured addresses") } - err = DecorateNode(localNode, map[string]interface{}{ - "operatorID": opts.OperatorID, - "subnets": opts.Subnets, - }) + err = DecorateNode( + localNode, + + // Satisfy decorations of forks supported by this node. + DecorateWithDomainType(dvs.domainType), + DecorateWithSubnets(opts.Subnets), + ) if err != nil { return nil, errors.Wrap(err, "could not decorate local node") } - logger.Debug("node record is ready", fields.ENRLocalNode(localNode), fields.OperatorIDStr(opts.OperatorID), fields.Subnets(opts.Subnets)) + logger.Debug("node record is ready", fields.ENRLocalNode(localNode), fields.Domain(dvs.domainType), fields.Subnets(opts.Subnets)) return localNode, nil } -// DecorateNode will enrich the local node record with more entries, according to current fork -func DecorateNode(node *enode.LocalNode, args map[string]interface{}) error { - var subnets []byte - raw, ok := args["subnets"] - if !ok { - subnets = make([]byte, commons.Subnets()) - } else { - subnets = raw.([]byte) - } - return records.SetSubnetsEntry(node, subnets) -} - // newUDPListener creates a udp server func newUDPListener(bindIP net.IP, port int, network string) (*net.UDPConn, error) { udpAddr := &net.UDPAddr{ diff --git a/network/discovery/dv5_service_test.go b/network/discovery/dv5_service_test.go new file mode 100644 index 0000000000..7e0ed5c3ba --- /dev/null +++ b/network/discovery/dv5_service_test.go @@ -0,0 +1,140 @@ +package discovery + +import ( + "context" + "net" + "os" + "testing" + + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/network/peers" + "github.com/bloxapp/ssv/network/peers/connections/mock" + "github.com/bloxapp/ssv/network/records" + "github.com/bloxapp/ssv/utils" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestCheckPeer(t *testing.T) { + var ( + ctx = context.Background() + logger = zap.NewNop() + myDomainType = spectypes.DomainType{0x1, 0x2, 0x3, 0x4} + mySubnets = mockSubnets(1, 2, 3) + tests = []*checkPeerTest{ + { + name: "valid", + domainType: &myDomainType, + subnets: mySubnets, + expectedError: nil, + }, + { + name: "missing domain type", + domainType: nil, + subnets: mySubnets, + expectedError: nil, + }, + { + name: "different domain type", + domainType: &spectypes.DomainType{0x1, 0x2, 0x3, 0x5}, + subnets: mySubnets, + expectedError: nil, + }, + { + name: "missing subnets", + domainType: &myDomainType, + subnets: nil, + expectedError: errors.New("could not read subnets"), + }, + { + name: "inactive subnets", + domainType: &myDomainType, + subnets: mockSubnets(), + expectedError: errors.New("zero subnets"), + }, + { + name: "no shared subnets", + domainType: &myDomainType, + subnets: mockSubnets(0, 4, 5), + expectedError: errors.New("no shared subnets"), + }, + { + name: "one shared subnet", + domainType: &myDomainType, + subnets: mockSubnets(0, 1, 4), + expectedError: nil, + }, + { + name: "two shared subnets", + domainType: &myDomainType, + subnets: mockSubnets(0, 1, 2), + expectedError: nil, + }, + } + ) + + // Create the LocalNode instances for the tests. + for _, test := range tests { + // Create a random network key. + priv, err := utils.ECDSAPrivateKey(logger, "") + require.NoError(t, err) + + // Create a temporary directory for storage. + tempDir := t.TempDir() + defer os.RemoveAll(tempDir) + + localNode, err := records.CreateLocalNode(priv, tempDir, net.ParseIP("127.0.0.1"), 12000, 13000) + require.NoError(t, err) + + if test.domainType != nil { + err := records.SetDomainTypeEntry(localNode, *test.domainType) + require.NoError(t, err) + } + if test.subnets != nil { + err := records.SetSubnetsEntry(localNode, test.subnets) + require.NoError(t, err) + } + + test.localNode = localNode + } + + // Run the tests. + subnetIndex := peers.NewSubnetsIndex(commons.Subnets()) + dvs := &DiscV5Service{ + ctx: ctx, + conns: &mock.MockConnectionIndex{LimitValue: true}, + subnetsIdx: subnetIndex, + domainType: myDomainType, + subnets: mySubnets, + } + + for _, test := range tests { + err := dvs.checkPeer(logger, PeerEvent{ + Node: test.localNode.Node(), + }) + if test.expectedError != nil { + require.ErrorContains(t, err, test.expectedError.Error(), test.name) + } else { + require.NoError(t, err, test.name) + } + } +} + +type checkPeerTest struct { + name string + domainType *spectypes.DomainType + subnets []byte + localNode *enode.LocalNode + expectedError error +} + +func mockSubnets(active ...int) []byte { + subnets := make([]byte, commons.Subnets()) + for _, subnet := range active { + subnets[subnet] = 1 + } + return subnets +} diff --git a/network/discovery/node_record.go b/network/discovery/node_record.go new file mode 100644 index 0000000000..d0e4d328b7 --- /dev/null +++ b/network/discovery/node_record.go @@ -0,0 +1,33 @@ +package discovery + +import ( + "fmt" + + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/network/records" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +type NodeRecordDecoration func(*enode.LocalNode) error + +func DecorateWithDomainType(domainType spectypes.DomainType) NodeRecordDecoration { + return func(node *enode.LocalNode) error { + return records.SetDomainTypeEntry(node, domainType) + } +} + +func DecorateWithSubnets(subnets []byte) NodeRecordDecoration { + return func(node *enode.LocalNode) error { + return records.SetSubnetsEntry(node, subnets) + } +} + +// DecorateNode will enrich the local node record with more entries, according to current fork +func DecorateNode(node *enode.LocalNode, decorations ...NodeRecordDecoration) error { + for _, decoration := range decorations { + if err := decoration(node); err != nil { + return fmt.Errorf("failed to decorate node record: %w", err) + } + } + return nil +} diff --git a/network/discovery/options.go b/network/discovery/options.go index 0902d78514..63e3614f70 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -32,8 +32,6 @@ type DiscV5Options struct { Bootnodes []string // Subnets is a bool slice represents all the subnets the node is intreseted in Subnets []byte - // OperatorID is the operator id (optional) - OperatorID string // EnableLogging when true enables logs to be emitted EnableLogging bool } diff --git a/network/discovery/service.go b/network/discovery/service.go index a44f910828..1d2da89815 100644 --- a/network/discovery/service.go +++ b/network/discovery/service.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/network/peers" ) @@ -39,6 +40,9 @@ type Options struct { SubnetsIdx peers.SubnetsIndex HostAddress string HostDNS string + + // DomainType is the SSV network domain of the node + DomainType spectypes.DomainType } // Service is the interface for discovery diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 10a0e7cbc3..c0cb09daf1 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -246,7 +246,6 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { TCPPort: n.cfg.TCPPort, NetworkKey: n.cfg.NetworkPrivateKey, Bootnodes: n.cfg.TransformBootnodes(), - OperatorID: n.cfg.OperatorID, EnableLogging: n.cfg.DiscoveryTrace, } if len(n.subnets) > 0 { diff --git a/network/peers/conn_manager_test.go b/network/peers/conn_manager_test.go index 6d6c3d4f5e..a65ebf3ae2 100644 --- a/network/peers/conn_manager_test.go +++ b/network/peers/conn_manager_test.go @@ -19,7 +19,7 @@ func TestTagBestPeers(t *testing.T) { connMgrMock := newConnMgr() allSubs, _ := records.Subnets{}.FromString(records.AllSubnets) - si := newSubnetsIndex(len(allSubs)) + si := NewSubnetsIndex(len(allSubs)) cm := NewConnManager(zap.NewNop(), connMgrMock, si).(*connManager) diff --git a/network/peers/connections/mock/mock_connection_index.go b/network/peers/connections/mock/mock_connection_index.go new file mode 100644 index 0000000000..7960d80450 --- /dev/null +++ b/network/peers/connections/mock/mock_connection_index.go @@ -0,0 +1,32 @@ +package mock + +import ( + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/zap" +) + +// MockConnectionIndex is a mock implementation of the ConnectionIndex interface +type MockConnectionIndex struct { + LimitValue bool +} + +// Connectedness panics if called +func (m *MockConnectionIndex) Connectedness(id peer.ID) network.Connectedness { + panic("Connectedness method is not implemented in MockConnectionIndex") +} + +// CanConnect panics if called +func (m *MockConnectionIndex) CanConnect(id peer.ID) bool { + panic("CanConnect method is not implemented in MockConnectionIndex") +} + +// Limit returns the mock value for Limit +func (m *MockConnectionIndex) Limit(dir network.Direction) bool { + return m.LimitValue +} + +// IsBad panics if called +func (m *MockConnectionIndex) IsBad(logger *zap.Logger, id peer.ID) bool { + panic("IsBad method is not implemented in MockConnectionIndex") +} diff --git a/network/peers/peers_index.go b/network/peers/peers_index.go index 06d846098b..5ba5fc535a 100644 --- a/network/peers/peers_index.go +++ b/network/peers/peers_index.go @@ -45,7 +45,7 @@ func NewPeersIndex(logger *zap.Logger, network libp2pnetwork.Network, self *reco return &peersIndex{ network: network, scoreIdx: newScoreIndex(), - SubnetsIndex: newSubnetsIndex(subnetsCount), + SubnetsIndex: NewSubnetsIndex(subnetsCount), PeerInfoIndex: NewPeerInfoIndex(), self: self, selfLock: &sync.RWMutex{}, diff --git a/network/peers/subnets.go b/network/peers/subnets.go index f9f68cfb44..7b1b5369f5 100644 --- a/network/peers/subnets.go +++ b/network/peers/subnets.go @@ -16,7 +16,7 @@ type subnetsIndex struct { lock *sync.RWMutex } -func newSubnetsIndex(count int) SubnetsIndex { +func NewSubnetsIndex(count int) SubnetsIndex { return &subnetsIndex{ subnets: make([][]peer.ID, count), peerSubnets: map[peer.ID]records.Subnets{}, diff --git a/network/peers/subnets_test.go b/network/peers/subnets_test.go index 3460d92a93..b28d62e04e 100644 --- a/network/peers/subnets_test.go +++ b/network/peers/subnets_test.go @@ -33,7 +33,7 @@ func TestSubnetsIndex(t *testing.T) { sPartial, err := records.Subnets{}.FromString("0x57b080fffd743d9878dc41a184ab160a") require.NoError(t, err) - subnetsIdx := newSubnetsIndex(128) + subnetsIdx := NewSubnetsIndex(128) subnetsIdx.UpdatePeerSubnets(pids[0], sAll.Clone()) subnetsIdx.UpdatePeerSubnets(pids[1], sNone.Clone()) diff --git a/network/records/entries.go b/network/records/entries.go new file mode 100644 index 0000000000..ef88047d90 --- /dev/null +++ b/network/records/entries.go @@ -0,0 +1,84 @@ +package records + +import ( + "io" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/rlp" + "github.com/pkg/errors" + "github.com/prysmaticlabs/go-bitfield" + + spectypes "github.com/bloxapp/ssv-spec/types" +) + +var ErrEntryNotFound = errors.New("not found") + +// DomainTypeEntry holds the domain type of the node +type DomainTypeEntry spectypes.DomainType + +// ENRKey implements enr.Entry, returns the entry key +func (dt DomainTypeEntry) ENRKey() string { return "domaintype" } + +// EncodeRLP implements rlp.Encoder, encodes domain type as bytes +func (dt DomainTypeEntry) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, dt[:]) +} + +// DecodeRLP implements rlp.Decoder, decodes domain type from bytes +func (dt *DomainTypeEntry) DecodeRLP(s *rlp.Stream) error { + var buf []byte + if err := s.Decode(&buf); err != nil { + return err + } + *dt = DomainTypeEntry(buf) + return nil +} + +// SetDomainTypeEntry adds domain type entry to the node +func SetDomainTypeEntry(node *enode.LocalNode, domainType spectypes.DomainType) error { + node.Set(DomainTypeEntry(domainType)) + return nil +} + +// GetDomainTypeEntry extracts the value of domain type entry +func GetDomainTypeEntry(record *enr.Record) (spectypes.DomainType, error) { + dt := new(DomainTypeEntry) + if err := record.Load(dt); err != nil { + if enr.IsNotFound(err) { + return spectypes.DomainType{}, ErrEntryNotFound + } + return spectypes.DomainType{}, err + } + return spectypes.DomainType(*dt), nil +} + +// SetSubnetsEntry adds subnets entry to our enode.LocalNode +func SetSubnetsEntry(node *enode.LocalNode, subnets []byte) error { + subnetsVec := bitfield.NewBitvector128() + for i, subnet := range subnets { + subnetsVec.SetBitAt(uint64(i), subnet > 0) + } + node.Set(enr.WithEntry("subnets", &subnetsVec)) + return nil +} + +// GetSubnetsEntry extracts the value of subnets entry from some record +func GetSubnetsEntry(record *enr.Record) ([]byte, error) { + subnetsVec := bitfield.NewBitvector128() + if err := record.Load(enr.WithEntry("subnets", &subnetsVec)); err != nil { + if enr.IsNotFound(err) { + return nil, ErrEntryNotFound + } + return nil, err + } + res := make([]byte, 0, subnetsVec.Len()) + for i := uint64(0); i < subnetsVec.Len(); i++ { + val := byte(0) + if subnetsVec.BitAt(i) { + val = 1 + } + res = append(res, val) + } + return res, nil +} diff --git a/network/records/subnets.go b/network/records/subnets.go index 9bba0d46ae..58dabb5c7d 100644 --- a/network/records/subnets.go +++ b/network/records/subnets.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" ) @@ -24,7 +23,7 @@ const ( // count is the amount of subnets, in case that the entry doesn't exist as we want to initialize it func UpdateSubnets(node *enode.LocalNode, count int, added []int, removed []int) ([]byte, error) { subnets, err := GetSubnetsEntry(node.Node().Record()) - if err != nil { + if err != nil && !errors.Is(err, ErrEntryNotFound) { return nil, errors.Wrap(err, "could not read subnets entry") } orig := make([]byte, len(subnets)) @@ -48,36 +47,6 @@ func UpdateSubnets(node *enode.LocalNode, count int, added []int, removed []int) return subnets, nil } -// SetSubnetsEntry adds subnets entry to our enode.LocalNode -func SetSubnetsEntry(node *enode.LocalNode, subnets []byte) error { - subnetsVec := bitfield.NewBitvector128() - for i, subnet := range subnets { - subnetsVec.SetBitAt(uint64(i), subnet > 0) - } - node.Set(enr.WithEntry("subnets", &subnetsVec)) - return nil -} - -// GetSubnetsEntry extracts the value of subnets entry from some record -func GetSubnetsEntry(record *enr.Record) ([]byte, error) { - subnetsVec := bitfield.NewBitvector128() - if err := record.Load(enr.WithEntry("subnets", &subnetsVec)); err != nil { - if enr.IsNotFound(err) { - return nil, nil - } - return nil, err - } - res := make([]byte, 0, subnetsVec.Len()) - for i := uint64(0); i < subnetsVec.Len(); i++ { - val := byte(0) - if subnetsVec.BitAt(i) { - val = 1 - } - res = append(res, val) - } - return res, nil -} - // Subnets holds all the subscribed subnets of a specific node type Subnets []byte From de5f8d4f7a489982e58011318e6a0379a2c8470f Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Mon, 30 Oct 2023 12:54:39 +0100 Subject: [PATCH 27/54] Remove non committee validator message processing (#1183) * only process ncv for exporters * deploy to 1-4 * deploy to all the rest * fix test - run ncv only for exporter --------- Co-authored-by: y0sher --- operator/validator/controller.go | 2 +- operator/validator/controller_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 604e1fbb55..0218c1862c 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -314,7 +314,7 @@ func (c *controller) handleRouterMessages() { hexPK := hex.EncodeToString(pk) if v, ok := c.validatorsMap.GetValidator(hexPK); ok { v.HandleMessage(c.logger, msg) - } else { + } else if c.validatorOptions.Exporter { if msg.MsgType != spectypes.SSVConsensusMsgType { continue // not supporting other types } diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index 2135d24ff3..f0e5ccfb3a 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -29,6 +29,10 @@ import ( func TestHandleNonCommitteeMessages(t *testing.T) { logger := logging.TestLogger(t) ctr := setupController(logger, map[string]*validator.Validator{}) // none committee + + // Only exporter handles non committee messages + ctr.validatorOptions.Exporter = true + go ctr.handleRouterMessages() var wg sync.WaitGroup From 98112781407c026f4f4e6eec30498a8d6484e317 Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Tue, 31 Oct 2023 19:12:01 +0100 Subject: [PATCH 28/54] Reduce heavy metrics collection (#1184) * test comment metrics * test comment metrics * Attempt to fix heavy metrics removal * remove pk from metrics to reduce size and load * revert gitlab ci --------- Co-authored-by: Nikita Kryuchkov --- network/p2p/metrics.go | 2 +- network/p2p/p2p_pubsub.go | 6 +++--- protocol/v2/qbft/instance/metrics.go | 14 ++++++-------- protocol/v2/ssv/runner/metrics/metrics.go | 3 --- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/network/p2p/metrics.go b/network/p2p/metrics.go index 843374774b..9792d394a1 100644 --- a/network/p2p/metrics.go +++ b/network/p2p/metrics.go @@ -32,7 +32,7 @@ var ( metricsRouterIncoming = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:network:router:in", Help: "Counts incoming messages", - }, []string{"identifier", "mt"}) + }, []string{"mt"}) ) func init() { diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index d88be4af21..46493376f8 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "fmt" + "github.com/bloxapp/ssv/protocol/v2/message" spectypes "github.com/bloxapp/ssv-spec/types" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -15,7 +16,6 @@ import ( "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/records" - "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) @@ -155,14 +155,14 @@ func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(ctx context.C return errors.New("message was not decoded") } - p2pID := decodedMsg.GetID().String() + //p2pID := decodedMsg.GetID().String() // logger.With( // zap.String("pubKey", hex.EncodeToString(ssvMsg.MsgID.GetPubKey())), // zap.String("role", ssvMsg.MsgID.GetRoleType().String()), // ).Debug("handlePubsubMessages") - metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(decodedMsg.MsgType)).Inc() + metricsRouterIncoming.WithLabelValues(message.MsgTypeToString(decodedMsg.MsgType)).Inc() n.msgRouter.Route(ctx, decodedMsg) diff --git a/protocol/v2/qbft/instance/metrics.go b/protocol/v2/qbft/instance/metrics.go index e32e49a872..246fbad291 100644 --- a/protocol/v2/qbft/instance/metrics.go +++ b/protocol/v2/qbft/instance/metrics.go @@ -1,7 +1,6 @@ package instance import ( - "encoding/hex" "time" specqbft "github.com/bloxapp/ssv-spec/qbft" @@ -16,11 +15,11 @@ var ( Name: "ssv_validator_instance_stage_duration_seconds", Help: "Instance stage duration (seconds)", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 5}, - }, []string{"stage", "pubKey"}) + }, []string{"stage"}) metricsRound = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ssv_qbft_instance_round", Help: "QBFT instance round", - }, []string{"roleType", "pubKey"}) + }, []string{"roleType"}) ) func init() { @@ -45,12 +44,11 @@ type metrics struct { } func newMetrics(msgID spectypes.MessageID) *metrics { - hexPubKey := hex.EncodeToString(msgID.GetPubKey()) return &metrics{ - proposalDuration: metricsStageDuration.WithLabelValues("proposal", hexPubKey), - prepareDuration: metricsStageDuration.WithLabelValues("prepare", hexPubKey), - commitDuration: metricsStageDuration.WithLabelValues("commit", hexPubKey), - round: metricsRound.WithLabelValues(msgID.GetRoleType().String(), hexPubKey), + proposalDuration: metricsStageDuration.WithLabelValues("proposal"), + prepareDuration: metricsStageDuration.WithLabelValues("prepare"), + commitDuration: metricsStageDuration.WithLabelValues("commit"), + round: metricsRound.WithLabelValues(msgID.GetRoleType().String()), } } diff --git a/protocol/v2/ssv/runner/metrics/metrics.go b/protocol/v2/ssv/runner/metrics/metrics.go index 5251ed909e..56ae65ed0f 100644 --- a/protocol/v2/ssv/runner/metrics/metrics.go +++ b/protocol/v2/ssv/runner/metrics/metrics.go @@ -45,9 +45,6 @@ var ( 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, - 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8, 5.9, 6.0, - 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7.0, - 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 8.0, }, }, []string{"role"}) metricsRolesSubmitted = promauto.NewCounterVec(prometheus.CounterOpts{ From 6345ba67fcd1d6f9e2920aab73cc9f9223efb2e0 Mon Sep 17 00:00:00 2001 From: Taiga <125817027+zktaiga@users.noreply.github.com> Date: Thu, 2 Nov 2023 17:15:06 +0400 Subject: [PATCH 29/54] Fix scraping typo (#1189) Co-authored-by: Lior Rutenberg --- .k8/hetzner-stage/ssv-node-58-deployment.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml index 21401421dd..d3d20846e4 100644 --- a/.k8/hetzner-stage/ssv-node-58-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -17,10 +17,10 @@ spec: protocol: TCP targetPort: 13058 name: port-13058 - - port: 15858 + - port: 15058 protocol: TCP - targetPort: 15858 - name: port-15858 + targetPort: 15058 + name: port-15058 - port: 16058 protocol: TCP targetPort: 16058 @@ -65,9 +65,9 @@ spec: - containerPort: 13058 name: port-13058 hostPort: 13058 - - containerPort: 15858 - name: port-15858 - hostPort: 15858 + - containerPort: 15058 + name: port-15058 + hostPort: 15058 - containerPort: 16058 name: port-16058 hostPort: 16058 @@ -104,7 +104,7 @@ spec: - name: DB_REPORTING value: "false" - name: METRICS_API_PORT - value: "15858" + value: "15058" - name: SSV_API_PORT value: "16058" - name: ENABLE_PROFILE From 3278f9822558bd210f33fe8e7fb95a16e63bc6ca Mon Sep 17 00:00:00 2001 From: Taiga Date: Thu, 2 Nov 2023 19:14:45 +0400 Subject: [PATCH 30/54] Use generic name for metrics port --- .k8/hetzner-stage/ssv-node-1-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-10-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-11-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-12-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-13-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-14-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-15-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-16-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-17-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-18-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-19-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-2-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-20-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-21-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-22-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-23-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-24-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-25-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-26-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-27-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-28-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-29-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-3-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-30-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-31-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-32-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-33-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-34-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-35-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-36-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-37-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-38-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-39-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-4-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-40-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-41-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-42-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-43-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-44-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-45-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-46-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-47-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-48-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-49-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-5-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-50-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-51-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-52-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-53-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-54-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-55-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-56-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-57-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-58-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-59-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-6-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-60-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-61-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-62-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-63-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-64-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-65-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-66-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-67-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-68-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-69-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-7-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-70-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-71-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-72-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-8-deployment.yml | 160 +++++++++---------- .k8/hetzner-stage/ssv-node-9-deployment.yml | 160 +++++++++---------- 72 files changed, 5760 insertions(+), 5760 deletions(-) diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml index 9b11ffbce6..3287e66997 100644 --- a/.k8/hetzner-stage/ssv-node-1-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15001 protocol: TCP targetPort: 15001 - name: port-15001 + name: metrics - port: 16001 protocol: TCP targetPort: 16001 @@ -48,86 +48,86 @@ spec: app: ssv-node-1 spec: containers: - - name: ssv-node-1 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12001 - name: port-12001 - hostPort: 12001 - protocol: UDP - - containerPort: 13001 - name: port-13001 - hostPort: 13001 - - containerPort: 15001 - name: port-15001 - hostPort: 15001 - - containerPort: 16001 - name: port-16001 - hostPort: 16001 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15001" - - name: SSV_API_PORT - value: "16001" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-1 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-1-cm + - name: ssv-node-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12001 + name: port-12001 + hostPort: 12001 + protocol: UDP + - containerPort: 13001 + name: port-13001 + hostPort: 13001 + - containerPort: 15001 + name: port-15001 + hostPort: 15001 + - containerPort: 16001 + name: port-16001 + hostPort: 16001 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15001" + - name: SSV_API_PORT + value: "16001" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-1-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-1 - persistentVolumeClaim: - claimName: ssv-node-1 - - name: ssv-node-1-cm - configMap: - name: ssv-node-1-cm + - name: ssv-node-1 + persistentVolumeClaim: + claimName: ssv-node-1 + - name: ssv-node-1-cm + configMap: + name: ssv-node-1-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml index 051cf589d4..194eb85750 100644 --- a/.k8/hetzner-stage/ssv-node-10-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15010 protocol: TCP targetPort: 15010 - name: port-15010 + name: metrics - port: 16010 protocol: TCP targetPort: 16010 @@ -48,86 +48,86 @@ spec: app: ssv-node-10 spec: containers: - - name: ssv-node-10 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12010 - name: port-12010 - protocol: UDP - hostPort: 12010 - - containerPort: 13010 - name: port-13010 - hostPort: 13010 - - containerPort: 15010 - name: port-15010 - hostPort: 15010 - - containerPort: 16010 - name: port-16010 - hostPort: 16010 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15010" - - name: SSV_API_PORT - value: "16010" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-10 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-10-cm + - name: ssv-node-10 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12010 + name: port-12010 + protocol: UDP + hostPort: 12010 + - containerPort: 13010 + name: port-13010 + hostPort: 13010 + - containerPort: 15010 + name: port-15010 + hostPort: 15010 + - containerPort: 16010 + name: port-16010 + hostPort: 16010 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15010" + - name: SSV_API_PORT + value: "16010" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-10 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-10-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-10 - persistentVolumeClaim: - claimName: ssv-node-10 - - name: ssv-node-10-cm - configMap: - name: ssv-node-10-cm + - name: ssv-node-10 + persistentVolumeClaim: + claimName: ssv-node-10 + - name: ssv-node-10-cm + configMap: + name: ssv-node-10-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml index e15bdb7b49..2fcd34b6bc 100644 --- a/.k8/hetzner-stage/ssv-node-11-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15011 protocol: TCP targetPort: 15011 - name: port-15011 + name: metrics - port: 16011 protocol: TCP targetPort: 16011 @@ -48,86 +48,86 @@ spec: app: ssv-node-11 spec: containers: - - name: ssv-node-11 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12011 - name: port-12011 - protocol: UDP - hostPort: 12011 - - containerPort: 13011 - name: port-13011 - hostPort: 13011 - - containerPort: 15011 - name: port-15011 - hostPort: 15011 - - containerPort: 16011 - name: port-16011 - hostPort: 16011 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15011" - - name: SSV_API_PORT - value: "16011" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-11 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-11-cm + - name: ssv-node-11 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12011 + name: port-12011 + protocol: UDP + hostPort: 12011 + - containerPort: 13011 + name: port-13011 + hostPort: 13011 + - containerPort: 15011 + name: port-15011 + hostPort: 15011 + - containerPort: 16011 + name: port-16011 + hostPort: 16011 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15011" + - name: SSV_API_PORT + value: "16011" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-11 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-11-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-11 - persistentVolumeClaim: - claimName: ssv-node-11 - - name: ssv-node-11-cm - configMap: - name: ssv-node-11-cm + - name: ssv-node-11 + persistentVolumeClaim: + claimName: ssv-node-11 + - name: ssv-node-11-cm + configMap: + name: ssv-node-11-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml index ebcc12a1ac..2f29d4be43 100644 --- a/.k8/hetzner-stage/ssv-node-12-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15012 protocol: TCP targetPort: 15012 - name: port-15012 + name: metrics - port: 16012 protocol: TCP targetPort: 16012 @@ -48,86 +48,86 @@ spec: app: ssv-node-12 spec: containers: - - name: ssv-node-12 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12012 - name: port-12012 - protocol: UDP - hostPort: 12012 - - containerPort: 13012 - name: port-13012 - hostPort: 13012 - - containerPort: 15012 - name: port-15012 - hostPort: 15012 - - containerPort: 16012 - name: port-16012 - hostPort: 16012 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15012" - - name: SSV_API_PORT - value: "16012" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-12 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-12-cm + - name: ssv-node-12 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12012 + name: port-12012 + protocol: UDP + hostPort: 12012 + - containerPort: 13012 + name: port-13012 + hostPort: 13012 + - containerPort: 15012 + name: port-15012 + hostPort: 15012 + - containerPort: 16012 + name: port-16012 + hostPort: 16012 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15012" + - name: SSV_API_PORT + value: "16012" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-12 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-12-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-12 - persistentVolumeClaim: - claimName: ssv-node-12 - - name: ssv-node-12-cm - configMap: - name: ssv-node-12-cm + - name: ssv-node-12 + persistentVolumeClaim: + claimName: ssv-node-12 + - name: ssv-node-12-cm + configMap: + name: ssv-node-12-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml index 53f1bae513..a6d64d39b0 100644 --- a/.k8/hetzner-stage/ssv-node-13-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15013 protocol: TCP targetPort: 15013 - name: port-15013 + name: metrics - port: 16013 protocol: TCP targetPort: 16013 @@ -48,86 +48,86 @@ spec: app: ssv-node-13 spec: containers: - - name: ssv-node-13 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12013 - name: port-12013 - protocol: UDP - hostPort: 12013 - - containerPort: 13013 - name: port-13013 - hostPort: 13013 - - containerPort: 15013 - name: port-15013 - hostPort: 15013 - - containerPort: 16013 - name: port-16013 - hostPort: 16013 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15013" - - name: SSV_API_PORT - value: "16013" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-13 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-13-cm + - name: ssv-node-13 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + protocol: UDP + hostPort: 12013 + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-13 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-13-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-13 - persistentVolumeClaim: - claimName: ssv-node-13 - - name: ssv-node-13-cm - configMap: - name: ssv-node-13-cm + - name: ssv-node-13 + persistentVolumeClaim: + claimName: ssv-node-13 + - name: ssv-node-13-cm + configMap: + name: ssv-node-13-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml index 65f47bc363..113d4abc7a 100644 --- a/.k8/hetzner-stage/ssv-node-14-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15014 protocol: TCP targetPort: 15014 - name: port-15014 + name: metrics - port: 16014 protocol: TCP targetPort: 16014 @@ -48,86 +48,86 @@ spec: app: ssv-node-14 spec: containers: - - name: ssv-node-14 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12014 - name: port-12014 - protocol: UDP - hostPort: 12014 - - containerPort: 13014 - name: port-13014 - hostPort: 13014 - - containerPort: 15014 - name: port-15014 - hostPort: 15014 - - containerPort: 16014 - name: port-16014 - hostPort: 16014 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15014" - - name: SSV_API_PORT - value: "16014" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-14 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-14-cm + - name: ssv-node-14 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12014 + name: port-12014 + protocol: UDP + hostPort: 12014 + - containerPort: 13014 + name: port-13014 + hostPort: 13014 + - containerPort: 15014 + name: port-15014 + hostPort: 15014 + - containerPort: 16014 + name: port-16014 + hostPort: 16014 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15014" + - name: SSV_API_PORT + value: "16014" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-14 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-14-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-14 - persistentVolumeClaim: - claimName: ssv-node-14 - - name: ssv-node-14-cm - configMap: - name: ssv-node-14-cm + - name: ssv-node-14 + persistentVolumeClaim: + claimName: ssv-node-14 + - name: ssv-node-14-cm + configMap: + name: ssv-node-14-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml index ec59df9720..1e02ab3230 100644 --- a/.k8/hetzner-stage/ssv-node-15-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15015 protocol: TCP targetPort: 15015 - name: port-15015 + name: metrics - port: 16015 protocol: TCP targetPort: 16015 @@ -48,86 +48,86 @@ spec: app: ssv-node-15 spec: containers: - - name: ssv-node-15 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12015 - name: port-12015 - protocol: UDP - hostPort: 12015 - - containerPort: 13015 - name: port-13015 - hostPort: 13015 - - containerPort: 15015 - name: port-15015 - hostPort: 15015 - - containerPort: 16015 - name: port-16015 - hostPort: 16015 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15015" - - name: SSV_API_PORT - value: "16015" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-15 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-15-cm + - name: ssv-node-15 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12015 + name: port-12015 + protocol: UDP + hostPort: 12015 + - containerPort: 13015 + name: port-13015 + hostPort: 13015 + - containerPort: 15015 + name: port-15015 + hostPort: 15015 + - containerPort: 16015 + name: port-16015 + hostPort: 16015 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15015" + - name: SSV_API_PORT + value: "16015" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-15 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-15-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-15 - persistentVolumeClaim: - claimName: ssv-node-15 - - name: ssv-node-15-cm - configMap: - name: ssv-node-15-cm + - name: ssv-node-15 + persistentVolumeClaim: + claimName: ssv-node-15 + - name: ssv-node-15-cm + configMap: + name: ssv-node-15-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml index f25f60b70c..d4d8a3802a 100644 --- a/.k8/hetzner-stage/ssv-node-16-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15016 protocol: TCP targetPort: 15016 - name: port-15016 + name: metrics - port: 16016 protocol: TCP targetPort: 16016 @@ -48,86 +48,86 @@ spec: app: ssv-node-16 spec: containers: - - name: ssv-node-16 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12016 - name: port-12016 - protocol: UDP - hostPort: 12016 - - containerPort: 13016 - name: port-13016 - hostPort: 13016 - - containerPort: 15016 - name: port-15016 - hostPort: 15016 - - containerPort: 16016 - name: port-16016 - hostPort: 16016 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15016" - - name: SSV_API_PORT - value: "16016" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-16 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-16-cm + - name: ssv-node-16 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12016 + name: port-12016 + protocol: UDP + hostPort: 12016 + - containerPort: 13016 + name: port-13016 + hostPort: 13016 + - containerPort: 15016 + name: port-15016 + hostPort: 15016 + - containerPort: 16016 + name: port-16016 + hostPort: 16016 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15016" + - name: SSV_API_PORT + value: "16016" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-16 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-16-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-16 - persistentVolumeClaim: - claimName: ssv-node-16 - - name: ssv-node-16-cm - configMap: - name: ssv-node-16-cm + - name: ssv-node-16 + persistentVolumeClaim: + claimName: ssv-node-16 + - name: ssv-node-16-cm + configMap: + name: ssv-node-16-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml index 14561ef74c..f27cc45f04 100644 --- a/.k8/hetzner-stage/ssv-node-17-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15017 protocol: TCP targetPort: 15017 - name: port-15017 + name: metrics - port: 16017 protocol: TCP targetPort: 16017 @@ -48,86 +48,86 @@ spec: app: ssv-node-17 spec: containers: - - name: ssv-node-17 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12017 - name: port-12017 - protocol: UDP - hostPort: 12017 - - containerPort: 13017 - name: port-13017 - hostPort: 13017 - - containerPort: 15017 - name: port-15017 - hostPort: 15017 - - containerPort: 16017 - name: port-16017 - hostPort: 16017 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15017" - - name: SSV_API_PORT - value: "16017" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-17 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-17-cm + - name: ssv-node-17 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12017 + name: port-12017 + protocol: UDP + hostPort: 12017 + - containerPort: 13017 + name: port-13017 + hostPort: 13017 + - containerPort: 15017 + name: port-15017 + hostPort: 15017 + - containerPort: 16017 + name: port-16017 + hostPort: 16017 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15017" + - name: SSV_API_PORT + value: "16017" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-17 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-17-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-17 - persistentVolumeClaim: - claimName: ssv-node-17 - - name: ssv-node-17-cm - configMap: - name: ssv-node-17-cm + - name: ssv-node-17 + persistentVolumeClaim: + claimName: ssv-node-17 + - name: ssv-node-17-cm + configMap: + name: ssv-node-17-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml index 40ac470dd3..3df713b625 100644 --- a/.k8/hetzner-stage/ssv-node-18-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15018 protocol: TCP targetPort: 15018 - name: port-15018 + name: metrics - port: 16018 protocol: TCP targetPort: 16018 @@ -48,86 +48,86 @@ spec: app: ssv-node-18 spec: containers: - - name: ssv-node-18 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12018 - name: port-12018 - protocol: UDP - hostPort: 12018 - - containerPort: 13018 - name: port-13018 - hostPort: 13018 - - containerPort: 15018 - name: port-15018 - hostPort: 15018 - - containerPort: 16018 - name: port-16018 - hostPort: 16018 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15018" - - name: SSV_API_PORT - value: "16018" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-18 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-18-cm + - name: ssv-node-18 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12018 + name: port-12018 + protocol: UDP + hostPort: 12018 + - containerPort: 13018 + name: port-13018 + hostPort: 13018 + - containerPort: 15018 + name: port-15018 + hostPort: 15018 + - containerPort: 16018 + name: port-16018 + hostPort: 16018 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15018" + - name: SSV_API_PORT + value: "16018" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-18 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-18-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-18 - persistentVolumeClaim: - claimName: ssv-node-18 - - name: ssv-node-18-cm - configMap: - name: ssv-node-18-cm + - name: ssv-node-18 + persistentVolumeClaim: + claimName: ssv-node-18 + - name: ssv-node-18-cm + configMap: + name: ssv-node-18-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml index a266c88e48..878654b288 100644 --- a/.k8/hetzner-stage/ssv-node-19-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15019 protocol: TCP targetPort: 15019 - name: port-15019 + name: metrics - port: 16019 protocol: TCP targetPort: 16019 @@ -48,86 +48,86 @@ spec: app: ssv-node-19 spec: containers: - - name: ssv-node-19 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12019 - name: port-12019 - protocol: UDP - hostPort: 12019 - - containerPort: 13019 - name: port-13019 - hostPort: 13019 - - containerPort: 15019 - name: port-15019 - hostPort: 15019 - - containerPort: 16019 - name: port-16019 - hostPort: 16019 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15019" - - name: SSV_API_PORT - value: "16019" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-19 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-19-cm + - name: ssv-node-19 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12019 + name: port-12019 + protocol: UDP + hostPort: 12019 + - containerPort: 13019 + name: port-13019 + hostPort: 13019 + - containerPort: 15019 + name: port-15019 + hostPort: 15019 + - containerPort: 16019 + name: port-16019 + hostPort: 16019 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15019" + - name: SSV_API_PORT + value: "16019" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-19 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-19-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-19 - persistentVolumeClaim: - claimName: ssv-node-19 - - name: ssv-node-19-cm - configMap: - name: ssv-node-19-cm + - name: ssv-node-19 + persistentVolumeClaim: + claimName: ssv-node-19 + - name: ssv-node-19-cm + configMap: + name: ssv-node-19-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml index f98472bdf2..1d6ff55534 100644 --- a/.k8/hetzner-stage/ssv-node-2-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15002 protocol: TCP targetPort: 15002 - name: port-15002 + name: metrics - port: 16002 protocol: TCP targetPort: 16002 @@ -48,86 +48,86 @@ spec: app: ssv-node-2 spec: containers: - - name: ssv-node-2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12002 - name: port-12002 - protocol: UDP - hostPort: 12002 - - containerPort: 13002 - name: port-13002 - hostPort: 13002 - - containerPort: 15002 - name: port-15002 - hostPort: 15002 - - containerPort: 16002 - name: port-16002 - hostPort: 16002 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15002" - - name: SSV_API_PORT - value: "16002" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-2 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-2-cm + - name: ssv-node-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12002 + name: port-12002 + protocol: UDP + hostPort: 12002 + - containerPort: 13002 + name: port-13002 + hostPort: 13002 + - containerPort: 15002 + name: port-15002 + hostPort: 15002 + - containerPort: 16002 + name: port-16002 + hostPort: 16002 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15002" + - name: SSV_API_PORT + value: "16002" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-2 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-2-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-2 - persistentVolumeClaim: - claimName: ssv-node-2 - - name: ssv-node-2-cm - configMap: - name: ssv-node-2-cm + - name: ssv-node-2 + persistentVolumeClaim: + claimName: ssv-node-2 + - name: ssv-node-2-cm + configMap: + name: ssv-node-2-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml index 2e4cc9792d..b3e800c134 100644 --- a/.k8/hetzner-stage/ssv-node-20-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15020 protocol: TCP targetPort: 15020 - name: port-15020 + name: metrics - port: 16020 protocol: TCP targetPort: 16020 @@ -48,86 +48,86 @@ spec: app: ssv-node-20 spec: containers: - - name: ssv-node-20 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12020 - name: port-12020 - protocol: UDP - hostPort: 12020 - - containerPort: 13020 - name: port-13020 - hostPort: 13020 - - containerPort: 15020 - name: port-15020 - hostPort: 15020 - - containerPort: 16020 - name: port-16020 - hostPort: 16020 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15020" - - name: SSV_API_PORT - value: "16020" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-20 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-20-cm + - name: ssv-node-20 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12020 + name: port-12020 + protocol: UDP + hostPort: 12020 + - containerPort: 13020 + name: port-13020 + hostPort: 13020 + - containerPort: 15020 + name: port-15020 + hostPort: 15020 + - containerPort: 16020 + name: port-16020 + hostPort: 16020 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15020" + - name: SSV_API_PORT + value: "16020" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-20 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-20-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-20 - persistentVolumeClaim: - claimName: ssv-node-20 - - name: ssv-node-20-cm - configMap: - name: ssv-node-20-cm + - name: ssv-node-20 + persistentVolumeClaim: + claimName: ssv-node-20 + - name: ssv-node-20-cm + configMap: + name: ssv-node-20-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml index 7e7a28c0fa..7983c59b02 100644 --- a/.k8/hetzner-stage/ssv-node-21-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15021 protocol: TCP targetPort: 15021 - name: port-15021 + name: metrics - port: 16021 protocol: TCP targetPort: 16021 @@ -48,86 +48,86 @@ spec: app: ssv-node-21 spec: containers: - - name: ssv-node-21 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12021 - name: port-12021 - protocol: UDP - hostPort: 12021 - - containerPort: 13021 - name: port-13021 - hostPort: 13021 - - containerPort: 15021 - name: port-15021 - hostPort: 15021 - - containerPort: 16021 - name: port-16021 - hostPort: 16021 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15021" - - name: SSV_API_PORT - value: "16021" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-21 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-21-cm + - name: ssv-node-21 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + protocol: UDP + hostPort: 12021 + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-21 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-21-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-21 - persistentVolumeClaim: - claimName: ssv-node-21 - - name: ssv-node-21-cm - configMap: - name: ssv-node-21-cm + - name: ssv-node-21 + persistentVolumeClaim: + claimName: ssv-node-21 + - name: ssv-node-21-cm + configMap: + name: ssv-node-21-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml index 1459d26dc6..914c166ad5 100644 --- a/.k8/hetzner-stage/ssv-node-22-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15022 protocol: TCP targetPort: 15022 - name: port-15022 + name: metrics - port: 16022 protocol: TCP targetPort: 16022 @@ -48,86 +48,86 @@ spec: app: ssv-node-22 spec: containers: - - name: ssv-node-22 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12022 - name: port-12022 - protocol: UDP - hostPort: 12022 - - containerPort: 13022 - name: port-13022 - hostPort: 13022 - - containerPort: 15022 - name: port-15022 - hostPort: 15022 - - containerPort: 16022 - name: port-16022 - hostPort: 16022 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15022" - - name: SSV_API_PORT - value: "16022" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-22 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-22-cm + - name: ssv-node-22 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + protocol: UDP + hostPort: 12022 + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-22 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-22-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-22 - persistentVolumeClaim: - claimName: ssv-node-22 - - name: ssv-node-22-cm - configMap: - name: ssv-node-22-cm + - name: ssv-node-22 + persistentVolumeClaim: + claimName: ssv-node-22 + - name: ssv-node-22-cm + configMap: + name: ssv-node-22-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-23-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml index a5eeac635c..f890622b3e 100644 --- a/.k8/hetzner-stage/ssv-node-23-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15023 protocol: TCP targetPort: 15023 - name: port-15023 + name: metrics - port: 16023 protocol: TCP targetPort: 16023 @@ -48,86 +48,86 @@ spec: app: ssv-node-23 spec: containers: - - name: ssv-node-23 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12023 - name: port-12023 - protocol: UDP - hostPort: 12023 - - containerPort: 13023 - name: port-13023 - hostPort: 13023 - - containerPort: 15023 - name: port-15023 - hostPort: 15023 - - containerPort: 16023 - name: port-16023 - hostPort: 16023 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15023" - - name: SSV_API_PORT - value: "16023" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-23 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-23-cm + - name: ssv-node-23 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12023 + name: port-12023 + protocol: UDP + hostPort: 12023 + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15023" + - name: SSV_API_PORT + value: "16023" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-23 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-23-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-23 - persistentVolumeClaim: - claimName: ssv-node-23 - - name: ssv-node-23-cm - configMap: - name: ssv-node-23-cm + - name: ssv-node-23 + persistentVolumeClaim: + claimName: ssv-node-23 + - name: ssv-node-23-cm + configMap: + name: ssv-node-23-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-24-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml index 5cb1e41b5f..30ca48aaad 100644 --- a/.k8/hetzner-stage/ssv-node-24-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15024 protocol: TCP targetPort: 15024 - name: port-15024 + name: metrics - port: 16024 protocol: TCP targetPort: 16024 @@ -48,86 +48,86 @@ spec: app: ssv-node-24 spec: containers: - - name: ssv-node-24 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12424 - name: port-12424 - protocol: UDP - hostPort: 12424 - - containerPort: 13024 - name: port-13024 - hostPort: 13024 - - containerPort: 15024 - name: port-15024 - hostPort: 15024 - - containerPort: 16024 - name: port-16024 - hostPort: 16024 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15024" - - name: SSV_API_PORT - value: "16024" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-24 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-24-cm + - name: ssv-node-24 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12424 + name: port-12424 + protocol: UDP + hostPort: 12424 + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15024" + - name: SSV_API_PORT + value: "16024" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-24 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-24-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-24 - persistentVolumeClaim: - claimName: ssv-node-24 - - name: ssv-node-24-cm - configMap: - name: ssv-node-24-cm + - name: ssv-node-24 + persistentVolumeClaim: + claimName: ssv-node-24 + - name: ssv-node-24-cm + configMap: + name: ssv-node-24-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-25-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml index ccd6e42cf2..21c2b30e36 100644 --- a/.k8/hetzner-stage/ssv-node-25-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15025 protocol: TCP targetPort: 15025 - name: port-15025 + name: metrics - port: 16025 protocol: TCP targetPort: 16025 @@ -48,86 +48,86 @@ spec: app: ssv-node-25 spec: containers: - - name: ssv-node-25 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12025 - name: port-12025 - protocol: UDP - hostPort: 12025 - - containerPort: 13025 - name: port-13025 - hostPort: 13025 - - containerPort: 15025 - name: port-15025 - hostPort: 15025 - - containerPort: 16025 - name: port-16025 - hostPort: 16025 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15025" - - name: SSV_API_PORT - value: "16025" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-25 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-25-cm + - name: ssv-node-25 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12025 + name: port-12025 + protocol: UDP + hostPort: 12025 + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15025" + - name: SSV_API_PORT + value: "16025" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-25 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-25-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-25 - persistentVolumeClaim: - claimName: ssv-node-25 - - name: ssv-node-25-cm - configMap: - name: ssv-node-25-cm + - name: ssv-node-25 + persistentVolumeClaim: + claimName: ssv-node-25 + - name: ssv-node-25-cm + configMap: + name: ssv-node-25-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-26-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml index 396e7360f1..75691eee89 100644 --- a/.k8/hetzner-stage/ssv-node-26-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15026 protocol: TCP targetPort: 15026 - name: port-15026 + name: metrics - port: 16026 protocol: TCP targetPort: 16026 @@ -48,86 +48,86 @@ spec: app: ssv-node-26 spec: containers: - - name: ssv-node-26 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12026 - name: port-12026 - protocol: UDP - hostPort: 12026 - - containerPort: 13026 - name: port-13026 - hostPort: 13026 - - containerPort: 15026 - name: port-15026 - hostPort: 15026 - - containerPort: 16026 - name: port-16026 - hostPort: 16026 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15026" - - name: SSV_API_PORT - value: "16026" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-26 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-26-cm + - name: ssv-node-26 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12026 + name: port-12026 + protocol: UDP + hostPort: 12026 + - containerPort: 13026 + name: port-13026 + hostPort: 13026 + - containerPort: 15026 + name: port-15026 + hostPort: 15026 + - containerPort: 16026 + name: port-16026 + hostPort: 16026 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15026" + - name: SSV_API_PORT + value: "16026" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-26 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-26-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-26 - persistentVolumeClaim: - claimName: ssv-node-26 - - name: ssv-node-26-cm - configMap: - name: ssv-node-26-cm + - name: ssv-node-26 + persistentVolumeClaim: + claimName: ssv-node-26 + - name: ssv-node-26-cm + configMap: + name: ssv-node-26-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml index 8674533272..9c0f90e5f3 100644 --- a/.k8/hetzner-stage/ssv-node-27-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15027 protocol: TCP targetPort: 15027 - name: port-15027 + name: metrics - port: 16027 protocol: TCP targetPort: 16027 @@ -48,86 +48,86 @@ spec: app: ssv-node-27 spec: containers: - - name: ssv-node-27 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12027 - name: port-12027 - protocol: UDP - hostPort: 12027 - - containerPort: 13027 - name: port-13027 - hostPort: 13027 - - containerPort: 15027 - name: port-15027 - hostPort: 15027 - - containerPort: 16027 - name: port-16027 - hostPort: 16027 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15027" - - name: SSV_API_PORT - value: "16027" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-27 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-27-cm + - name: ssv-node-27 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12027 + name: port-12027 + protocol: UDP + hostPort: 12027 + - containerPort: 13027 + name: port-13027 + hostPort: 13027 + - containerPort: 15027 + name: port-15027 + hostPort: 15027 + - containerPort: 16027 + name: port-16027 + hostPort: 16027 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15027" + - name: SSV_API_PORT + value: "16027" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-27 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-27-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-27 - persistentVolumeClaim: - claimName: ssv-node-27 - - name: ssv-node-27-cm - configMap: - name: ssv-node-27-cm + - name: ssv-node-27 + persistentVolumeClaim: + claimName: ssv-node-27 + - name: ssv-node-27-cm + configMap: + name: ssv-node-27-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml index 08712b773b..3b61f9e7e0 100644 --- a/.k8/hetzner-stage/ssv-node-28-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15028 protocol: TCP targetPort: 15028 - name: port-15028 + name: metrics - port: 16028 protocol: TCP targetPort: 16028 @@ -48,86 +48,86 @@ spec: app: ssv-node-28 spec: containers: - - name: ssv-node-28 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12028 - name: port-12028 - protocol: UDP - hostPort: 12028 - - containerPort: 13028 - name: port-13028 - hostPort: 13028 - - containerPort: 15028 - name: port-15028 - hostPort: 15028 - - containerPort: 16028 - name: port-16028 - hostPort: 16028 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15028" - - name: SSV_API_PORT - value: "16028" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-28 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-28-cm + - name: ssv-node-28 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12028 + name: port-12028 + protocol: UDP + hostPort: 12028 + - containerPort: 13028 + name: port-13028 + hostPort: 13028 + - containerPort: 15028 + name: port-15028 + hostPort: 15028 + - containerPort: 16028 + name: port-16028 + hostPort: 16028 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15028" + - name: SSV_API_PORT + value: "16028" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-28 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-28-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-28 - persistentVolumeClaim: - claimName: ssv-node-28 - - name: ssv-node-28-cm - configMap: - name: ssv-node-28-cm + - name: ssv-node-28 + persistentVolumeClaim: + claimName: ssv-node-28 + - name: ssv-node-28-cm + configMap: + name: ssv-node-28-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml index acb427576c..2b9a312a16 100644 --- a/.k8/hetzner-stage/ssv-node-29-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15029 protocol: TCP targetPort: 15029 - name: port-15029 + name: metrics - port: 16029 protocol: TCP targetPort: 16029 @@ -48,86 +48,86 @@ spec: app: ssv-node-29 spec: containers: - - name: ssv-node-29 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12029 - name: port-12029 - protocol: UDP - hostPort: 12029 - - containerPort: 13029 - name: port-13029 - hostPort: 13029 - - containerPort: 15029 - name: port-15029 - hostPort: 15029 - - containerPort: 16029 - name: port-16029 - hostPort: 16029 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15029" - - name: SSV_API_PORT - value: "16029" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-29 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-29-cm + - name: ssv-node-29 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12029 + name: port-12029 + protocol: UDP + hostPort: 12029 + - containerPort: 13029 + name: port-13029 + hostPort: 13029 + - containerPort: 15029 + name: port-15029 + hostPort: 15029 + - containerPort: 16029 + name: port-16029 + hostPort: 16029 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15029" + - name: SSV_API_PORT + value: "16029" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-29 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-29-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-29 - persistentVolumeClaim: - claimName: ssv-node-29 - - name: ssv-node-29-cm - configMap: - name: ssv-node-29-cm + - name: ssv-node-29 + persistentVolumeClaim: + claimName: ssv-node-29 + - name: ssv-node-29-cm + configMap: + name: ssv-node-29-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml index 8486b720d0..a001e63afd 100644 --- a/.k8/hetzner-stage/ssv-node-3-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15003 protocol: TCP targetPort: 15003 - name: port-15003 + name: metrics - port: 16003 protocol: TCP targetPort: 16003 @@ -48,86 +48,86 @@ spec: app: ssv-node-3 spec: containers: - - name: ssv-node-3 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12003 - name: port-12003 - protocol: UDP - hostPort: 12003 - - containerPort: 13003 - name: port-13003 - hostPort: 13003 - - containerPort: 15003 - name: port-15003 - hostPort: 15003 - - containerPort: 16003 - name: port-16003 - hostPort: 16003 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15003" - - name: SSV_API_PORT - value: "16003" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-3 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-3-cm + - name: ssv-node-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12003 + name: port-12003 + protocol: UDP + hostPort: 12003 + - containerPort: 13003 + name: port-13003 + hostPort: 13003 + - containerPort: 15003 + name: port-15003 + hostPort: 15003 + - containerPort: 16003 + name: port-16003 + hostPort: 16003 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15003" + - name: SSV_API_PORT + value: "16003" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-3 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-3-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-3 - persistentVolumeClaim: - claimName: ssv-node-3 - - name: ssv-node-3-cm - configMap: - name: ssv-node-3-cm + - name: ssv-node-3 + persistentVolumeClaim: + claimName: ssv-node-3 + - name: ssv-node-3-cm + configMap: + name: ssv-node-3-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml index 239bbc7302..1b5b7bd0db 100644 --- a/.k8/hetzner-stage/ssv-node-30-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15030 protocol: TCP targetPort: 15030 - name: port-15030 + name: metrics - port: 16030 protocol: TCP targetPort: 16030 @@ -48,86 +48,86 @@ spec: app: ssv-node-30 spec: containers: - - name: ssv-node-30 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12030 - name: port-12030 - protocol: UDP - hostPort: 12030 - - containerPort: 13030 - name: port-13030 - hostPort: 13030 - - containerPort: 15030 - name: port-15030 - hostPort: 15030 - - containerPort: 16030 - name: port-16030 - hostPort: 16030 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15030" - - name: SSV_API_PORT - value: "16030" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-30 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-30-cm + - name: ssv-node-30 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12030 + name: port-12030 + protocol: UDP + hostPort: 12030 + - containerPort: 13030 + name: port-13030 + hostPort: 13030 + - containerPort: 15030 + name: port-15030 + hostPort: 15030 + - containerPort: 16030 + name: port-16030 + hostPort: 16030 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15030" + - name: SSV_API_PORT + value: "16030" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-30 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-30-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-30 - persistentVolumeClaim: - claimName: ssv-node-30 - - name: ssv-node-30-cm - configMap: - name: ssv-node-30-cm + - name: ssv-node-30 + persistentVolumeClaim: + claimName: ssv-node-30 + - name: ssv-node-30-cm + configMap: + name: ssv-node-30-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml index af78e460ce..a0b439ec11 100644 --- a/.k8/hetzner-stage/ssv-node-31-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15031 protocol: TCP targetPort: 15031 - name: port-15031 + name: metrics - port: 16031 protocol: TCP targetPort: 16031 @@ -48,86 +48,86 @@ spec: app: ssv-node-31 spec: containers: - - name: ssv-node-31 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12031 - name: port-12031 - protocol: UDP - hostPort: 12031 - - containerPort: 13031 - name: port-13031 - hostPort: 13031 - - containerPort: 15031 - name: port-15031 - hostPort: 15031 - - containerPort: 16031 - name: port-16031 - hostPort: 16031 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15031" - - name: SSV_API_PORT - value: "16031" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-31 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-31-cm + - name: ssv-node-31 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12031 + name: port-12031 + protocol: UDP + hostPort: 12031 + - containerPort: 13031 + name: port-13031 + hostPort: 13031 + - containerPort: 15031 + name: port-15031 + hostPort: 15031 + - containerPort: 16031 + name: port-16031 + hostPort: 16031 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15031" + - name: SSV_API_PORT + value: "16031" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-31 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-31-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-31 - persistentVolumeClaim: - claimName: ssv-node-31 - - name: ssv-node-31-cm - configMap: - name: ssv-node-31-cm + - name: ssv-node-31 + persistentVolumeClaim: + claimName: ssv-node-31 + - name: ssv-node-31-cm + configMap: + name: ssv-node-31-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml index d6567ac81e..f7e7bd5d21 100644 --- a/.k8/hetzner-stage/ssv-node-32-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15032 protocol: TCP targetPort: 15032 - name: port-15032 + name: metrics - port: 16032 protocol: TCP targetPort: 16032 @@ -48,86 +48,86 @@ spec: app: ssv-node-32 spec: containers: - - name: ssv-node-32 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12032 - name: port-12032 - protocol: UDP - hostPort: 12032 - - containerPort: 13032 - name: port-13032 - hostPort: 13032 - - containerPort: 15032 - name: port-15032 - hostPort: 15032 - - containerPort: 16032 - name: port-16032 - hostPort: 16032 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15032" - - name: SSV_API_PORT - value: "16032" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-32 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-32-cm + - name: ssv-node-32 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12032 + name: port-12032 + protocol: UDP + hostPort: 12032 + - containerPort: 13032 + name: port-13032 + hostPort: 13032 + - containerPort: 15032 + name: port-15032 + hostPort: 15032 + - containerPort: 16032 + name: port-16032 + hostPort: 16032 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15032" + - name: SSV_API_PORT + value: "16032" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-32 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-32-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-32 - persistentVolumeClaim: - claimName: ssv-node-32 - - name: ssv-node-32-cm - configMap: - name: ssv-node-32-cm + - name: ssv-node-32 + persistentVolumeClaim: + claimName: ssv-node-32 + - name: ssv-node-32-cm + configMap: + name: ssv-node-32-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml index 6b72d090df..022fdea98e 100644 --- a/.k8/hetzner-stage/ssv-node-33-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15033 protocol: TCP targetPort: 15033 - name: port-15033 + name: metrics - port: 16033 protocol: TCP targetPort: 16033 @@ -48,86 +48,86 @@ spec: app: ssv-node-33 spec: containers: - - name: ssv-node-33 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12033 - name: port-12033 - protocol: UDP - hostPort: 12033 - - containerPort: 13033 - name: port-13033 - hostPort: 13033 - - containerPort: 15033 - name: port-15033 - hostPort: 15033 - - containerPort: 16033 - name: port-16033 - hostPort: 16033 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15033" - - name: SSV_API_PORT - value: "16033" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-33 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-33-cm + - name: ssv-node-33 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12033 + name: port-12033 + protocol: UDP + hostPort: 12033 + - containerPort: 13033 + name: port-13033 + hostPort: 13033 + - containerPort: 15033 + name: port-15033 + hostPort: 15033 + - containerPort: 16033 + name: port-16033 + hostPort: 16033 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15033" + - name: SSV_API_PORT + value: "16033" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-33 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-33-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-33 - persistentVolumeClaim: - claimName: ssv-node-33 - - name: ssv-node-33-cm - configMap: - name: ssv-node-33-cm + - name: ssv-node-33 + persistentVolumeClaim: + claimName: ssv-node-33 + - name: ssv-node-33-cm + configMap: + name: ssv-node-33-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml index 363b7b16d3..ea73c678b5 100644 --- a/.k8/hetzner-stage/ssv-node-34-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15034 protocol: TCP targetPort: 15034 - name: port-15034 + name: metrics - port: 16034 protocol: TCP targetPort: 16034 @@ -48,86 +48,86 @@ spec: app: ssv-node-34 spec: containers: - - name: ssv-node-34 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12034 - name: port-12034 - protocol: UDP - hostPort: 12034 - - containerPort: 13034 - name: port-13034 - hostPort: 13034 - - containerPort: 15034 - name: port-15034 - hostPort: 15034 - - containerPort: 16034 - name: port-16034 - hostPort: 16034 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15034" - - name: SSV_API_PORT - value: "16034" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-34 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-34-cm + - name: ssv-node-34 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12034 + name: port-12034 + protocol: UDP + hostPort: 12034 + - containerPort: 13034 + name: port-13034 + hostPort: 13034 + - containerPort: 15034 + name: port-15034 + hostPort: 15034 + - containerPort: 16034 + name: port-16034 + hostPort: 16034 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15034" + - name: SSV_API_PORT + value: "16034" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-34 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-34-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-34 - persistentVolumeClaim: - claimName: ssv-node-34 - - name: ssv-node-34-cm - configMap: - name: ssv-node-34-cm + - name: ssv-node-34 + persistentVolumeClaim: + claimName: ssv-node-34 + - name: ssv-node-34-cm + configMap: + name: ssv-node-34-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml index 0693b7da9d..934c13dad8 100644 --- a/.k8/hetzner-stage/ssv-node-35-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15035 protocol: TCP targetPort: 15035 - name: port-15035 + name: metrics - port: 16035 protocol: TCP targetPort: 16035 @@ -48,86 +48,86 @@ spec: app: ssv-node-35 spec: containers: - - name: ssv-node-35 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12035 - name: port-12035 - protocol: UDP - hostPort: 12035 - - containerPort: 13035 - name: port-13035 - hostPort: 13035 - - containerPort: 15035 - name: port-15035 - hostPort: 15035 - - containerPort: 16035 - name: port-16035 - hostPort: 16035 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15035" - - name: SSV_API_PORT - value: "16035" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-35 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-35-cm + - name: ssv-node-35 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12035 + name: port-12035 + protocol: UDP + hostPort: 12035 + - containerPort: 13035 + name: port-13035 + hostPort: 13035 + - containerPort: 15035 + name: port-15035 + hostPort: 15035 + - containerPort: 16035 + name: port-16035 + hostPort: 16035 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15035" + - name: SSV_API_PORT + value: "16035" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-35 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-35-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-35 - persistentVolumeClaim: - claimName: ssv-node-35 - - name: ssv-node-35-cm - configMap: - name: ssv-node-35-cm + - name: ssv-node-35 + persistentVolumeClaim: + claimName: ssv-node-35 + - name: ssv-node-35-cm + configMap: + name: ssv-node-35-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml index 65a1566a23..a0a16967a0 100644 --- a/.k8/hetzner-stage/ssv-node-36-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15036 protocol: TCP targetPort: 15036 - name: port-15036 + name: metrics - port: 16036 protocol: TCP targetPort: 16036 @@ -48,86 +48,86 @@ spec: app: ssv-node-36 spec: containers: - - name: ssv-node-36 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12036 - name: port-12036 - protocol: UDP - hostPort: 12036 - - containerPort: 13036 - name: port-13036 - hostPort: 13036 - - containerPort: 15036 - name: port-15036 - hostPort: 15036 - - containerPort: 16036 - name: port-16036 - hostPort: 16036 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15036" - - name: SSV_API_PORT - value: "16036" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-36 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-36-cm + - name: ssv-node-36 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12036 + name: port-12036 + protocol: UDP + hostPort: 12036 + - containerPort: 13036 + name: port-13036 + hostPort: 13036 + - containerPort: 15036 + name: port-15036 + hostPort: 15036 + - containerPort: 16036 + name: port-16036 + hostPort: 16036 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15036" + - name: SSV_API_PORT + value: "16036" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-36 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-36-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-36 - persistentVolumeClaim: - claimName: ssv-node-36 - - name: ssv-node-36-cm - configMap: - name: ssv-node-36-cm + - name: ssv-node-36 + persistentVolumeClaim: + claimName: ssv-node-36 + - name: ssv-node-36-cm + configMap: + name: ssv-node-36-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml index 3c312c1560..46ae2171ca 100644 --- a/.k8/hetzner-stage/ssv-node-37-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15037 protocol: TCP targetPort: 15037 - name: port-15037 + name: metrics - port: 16037 protocol: TCP targetPort: 16037 @@ -48,86 +48,86 @@ spec: app: ssv-node-37 spec: containers: - - name: ssv-node-37 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12037 - name: port-12037 - protocol: UDP - hostPort: 12037 - - containerPort: 13037 - name: port-13037 - hostPort: 13037 - - containerPort: 15037 - name: port-15037 - hostPort: 15037 - - containerPort: 16037 - name: port-16037 - hostPort: 16037 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15037" - - name: SSV_API_PORT - value: "16037" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-37 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-37-cm + - name: ssv-node-37 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12037 + name: port-12037 + protocol: UDP + hostPort: 12037 + - containerPort: 13037 + name: port-13037 + hostPort: 13037 + - containerPort: 15037 + name: port-15037 + hostPort: 15037 + - containerPort: 16037 + name: port-16037 + hostPort: 16037 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15037" + - name: SSV_API_PORT + value: "16037" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-37 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-37-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-37 - persistentVolumeClaim: - claimName: ssv-node-37 - - name: ssv-node-37-cm - configMap: - name: ssv-node-37-cm + - name: ssv-node-37 + persistentVolumeClaim: + claimName: ssv-node-37 + - name: ssv-node-37-cm + configMap: + name: ssv-node-37-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml index ba3e0dacb2..4d174185bc 100644 --- a/.k8/hetzner-stage/ssv-node-38-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15038 protocol: TCP targetPort: 15038 - name: port-15038 + name: metrics - port: 16038 protocol: TCP targetPort: 16038 @@ -48,86 +48,86 @@ spec: app: ssv-node-38 spec: containers: - - name: ssv-node-38 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12038 - name: port-12038 - protocol: UDP - hostPort: 12038 - - containerPort: 13038 - name: port-13038 - hostPort: 13038 - - containerPort: 15038 - name: port-15038 - hostPort: 15038 - - containerPort: 16038 - name: port-16038 - hostPort: 16038 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15038" - - name: SSV_API_PORT - value: "16038" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-38 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-38-cm + - name: ssv-node-38 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12038 + name: port-12038 + protocol: UDP + hostPort: 12038 + - containerPort: 13038 + name: port-13038 + hostPort: 13038 + - containerPort: 15038 + name: port-15038 + hostPort: 15038 + - containerPort: 16038 + name: port-16038 + hostPort: 16038 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15038" + - name: SSV_API_PORT + value: "16038" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-38 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-38-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-38 - persistentVolumeClaim: - claimName: ssv-node-38 - - name: ssv-node-38-cm - configMap: - name: ssv-node-38-cm + - name: ssv-node-38 + persistentVolumeClaim: + claimName: ssv-node-38 + - name: ssv-node-38-cm + configMap: + name: ssv-node-38-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml index cef15eed57..a32b658d36 100644 --- a/.k8/hetzner-stage/ssv-node-39-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15039 protocol: TCP targetPort: 15039 - name: port-15039 + name: metrics - port: 16039 protocol: TCP targetPort: 16039 @@ -48,86 +48,86 @@ spec: app: ssv-node-39 spec: containers: - - name: ssv-node-39 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12039 - name: port-12039 - protocol: UDP - hostPort: 12039 - - containerPort: 13039 - name: port-13039 - hostPort: 13039 - - containerPort: 15039 - name: port-15039 - hostPort: 15039 - - containerPort: 16039 - name: port-16039 - hostPort: 16039 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15039" - - name: SSV_API_PORT - value: "16039" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-39 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-39-cm + - name: ssv-node-39 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12039 + name: port-12039 + protocol: UDP + hostPort: 12039 + - containerPort: 13039 + name: port-13039 + hostPort: 13039 + - containerPort: 15039 + name: port-15039 + hostPort: 15039 + - containerPort: 16039 + name: port-16039 + hostPort: 16039 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15039" + - name: SSV_API_PORT + value: "16039" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-39 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-39-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-39 - persistentVolumeClaim: - claimName: ssv-node-39 - - name: ssv-node-39-cm - configMap: - name: ssv-node-39-cm + - name: ssv-node-39 + persistentVolumeClaim: + claimName: ssv-node-39 + - name: ssv-node-39-cm + configMap: + name: ssv-node-39-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml index 758473cb70..9e86515560 100644 --- a/.k8/hetzner-stage/ssv-node-4-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15004 protocol: TCP targetPort: 15004 - name: port-15004 + name: metrics - port: 16004 protocol: TCP targetPort: 16004 @@ -48,86 +48,86 @@ spec: app: ssv-node-4 spec: containers: - - name: ssv-node-4 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12004 - name: port-12004 - protocol: UDP - hostPort: 12004 - - containerPort: 13004 - name: port-13004 - hostPort: 13004 - - containerPort: 15004 - name: port-15004 - hostPort: 15004 - - containerPort: 16004 - name: port-16004 - hostPort: 16004 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15004" - - name: SSV_API_PORT - value: "16004" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-4 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-4-cm + - name: ssv-node-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12004 + name: port-12004 + protocol: UDP + hostPort: 12004 + - containerPort: 13004 + name: port-13004 + hostPort: 13004 + - containerPort: 15004 + name: port-15004 + hostPort: 15004 + - containerPort: 16004 + name: port-16004 + hostPort: 16004 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15004" + - name: SSV_API_PORT + value: "16004" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-4 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-4-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-4 - persistentVolumeClaim: - claimName: ssv-node-4 - - name: ssv-node-4-cm - configMap: - name: ssv-node-4-cm + - name: ssv-node-4 + persistentVolumeClaim: + claimName: ssv-node-4 + - name: ssv-node-4-cm + configMap: + name: ssv-node-4-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml index 022eded9fd..2510a5e0f4 100644 --- a/.k8/hetzner-stage/ssv-node-40-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15040 protocol: TCP targetPort: 15040 - name: port-15040 + name: metrics - port: 16040 protocol: TCP targetPort: 16040 @@ -48,86 +48,86 @@ spec: app: ssv-node-40 spec: containers: - - name: ssv-node-40 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12040 - name: port-12040 - protocol: UDP - hostPort: 12040 - - containerPort: 13040 - name: port-13040 - hostPort: 13040 - - containerPort: 15040 - name: port-15040 - hostPort: 15040 - - containerPort: 16040 - name: port-16040 - hostPort: 16040 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15040" - - name: SSV_API_PORT - value: "16040" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-40 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-40-cm + - name: ssv-node-40 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12040 + name: port-12040 + protocol: UDP + hostPort: 12040 + - containerPort: 13040 + name: port-13040 + hostPort: 13040 + - containerPort: 15040 + name: port-15040 + hostPort: 15040 + - containerPort: 16040 + name: port-16040 + hostPort: 16040 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15040" + - name: SSV_API_PORT + value: "16040" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-40 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-40-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-40 - persistentVolumeClaim: - claimName: ssv-node-40 - - name: ssv-node-40-cm - configMap: - name: ssv-node-40-cm + - name: ssv-node-40 + persistentVolumeClaim: + claimName: ssv-node-40 + - name: ssv-node-40-cm + configMap: + name: ssv-node-40-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml index b2fc6fcad1..b7af959dce 100644 --- a/.k8/hetzner-stage/ssv-node-41-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15041 protocol: TCP targetPort: 15041 - name: port-15041 + name: metrics - port: 16041 protocol: TCP targetPort: 16041 @@ -48,86 +48,86 @@ spec: app: ssv-node-41 spec: containers: - - name: ssv-node-41 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12041 - name: port-12041 - protocol: UDP - hostPort: 12041 - - containerPort: 13041 - name: port-13041 - hostPort: 13041 - - containerPort: 15041 - name: port-15041 - hostPort: 15041 - - containerPort: 16041 - name: port-16041 - hostPort: 16041 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15041" - - name: SSV_API_PORT - value: "16041" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-41 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-41-cm + - name: ssv-node-41 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12041 + name: port-12041 + protocol: UDP + hostPort: 12041 + - containerPort: 13041 + name: port-13041 + hostPort: 13041 + - containerPort: 15041 + name: port-15041 + hostPort: 15041 + - containerPort: 16041 + name: port-16041 + hostPort: 16041 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15041" + - name: SSV_API_PORT + value: "16041" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-41 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-41-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-41 - persistentVolumeClaim: - claimName: ssv-node-41 - - name: ssv-node-41-cm - configMap: - name: ssv-node-41-cm + - name: ssv-node-41 + persistentVolumeClaim: + claimName: ssv-node-41 + - name: ssv-node-41-cm + configMap: + name: ssv-node-41-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml index 3664aeca45..763e462ebd 100644 --- a/.k8/hetzner-stage/ssv-node-42-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15042 protocol: TCP targetPort: 15042 - name: port-15042 + name: metrics - port: 16042 protocol: TCP targetPort: 16042 @@ -48,86 +48,86 @@ spec: app: ssv-node-42 spec: containers: - - name: ssv-node-42 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12042 - name: port-12042 - protocol: UDP - hostPort: 12042 - - containerPort: 13042 - name: port-13042 - hostPort: 13042 - - containerPort: 15042 - name: port-15042 - hostPort: 15042 - - containerPort: 16042 - name: port-16042 - hostPort: 16042 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15042" - - name: SSV_API_PORT - value: "16042" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-42 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-42-cm + - name: ssv-node-42 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12042 + name: port-12042 + protocol: UDP + hostPort: 12042 + - containerPort: 13042 + name: port-13042 + hostPort: 13042 + - containerPort: 15042 + name: port-15042 + hostPort: 15042 + - containerPort: 16042 + name: port-16042 + hostPort: 16042 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15042" + - name: SSV_API_PORT + value: "16042" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-42 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-42-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-42 - persistentVolumeClaim: - claimName: ssv-node-42 - - name: ssv-node-42-cm - configMap: - name: ssv-node-42-cm + - name: ssv-node-42 + persistentVolumeClaim: + claimName: ssv-node-42 + - name: ssv-node-42-cm + configMap: + name: ssv-node-42-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml index a9cd4f9b95..78ef2ed8a9 100644 --- a/.k8/hetzner-stage/ssv-node-43-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15043 protocol: TCP targetPort: 15043 - name: port-15043 + name: metrics - port: 16043 protocol: TCP targetPort: 16043 @@ -48,86 +48,86 @@ spec: app: ssv-node-43 spec: containers: - - name: ssv-node-43 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12043 - name: port-12043 - protocol: UDP - hostPort: 12043 - - containerPort: 13043 - name: port-13043 - hostPort: 13043 - - containerPort: 15043 - name: port-15043 - hostPort: 15043 - - containerPort: 16043 - name: port-16043 - hostPort: 16043 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15043" - - name: SSV_API_PORT - value: "16043" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-43 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-43-cm + - name: ssv-node-43 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12043 + name: port-12043 + protocol: UDP + hostPort: 12043 + - containerPort: 13043 + name: port-13043 + hostPort: 13043 + - containerPort: 15043 + name: port-15043 + hostPort: 15043 + - containerPort: 16043 + name: port-16043 + hostPort: 16043 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15043" + - name: SSV_API_PORT + value: "16043" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-43 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-43-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-43 - persistentVolumeClaim: - claimName: ssv-node-43 - - name: ssv-node-43-cm - configMap: - name: ssv-node-43-cm + - name: ssv-node-43 + persistentVolumeClaim: + claimName: ssv-node-43 + - name: ssv-node-43-cm + configMap: + name: ssv-node-43-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml index 01d0e22a17..2e7a8fb096 100644 --- a/.k8/hetzner-stage/ssv-node-44-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15044 protocol: TCP targetPort: 15044 - name: port-15044 + name: metrics - port: 16044 protocol: TCP targetPort: 16044 @@ -48,86 +48,86 @@ spec: app: ssv-node-44 spec: containers: - - name: ssv-node-44 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12044 - name: port-12044 - protocol: UDP - hostPort: 12044 - - containerPort: 13044 - name: port-13044 - hostPort: 13044 - - containerPort: 15044 - name: port-15044 - hostPort: 15044 - - containerPort: 16044 - name: port-16044 - hostPort: 16044 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15044" - - name: SSV_API_PORT - value: "16044" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-44 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-44-cm + - name: ssv-node-44 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12044 + name: port-12044 + protocol: UDP + hostPort: 12044 + - containerPort: 13044 + name: port-13044 + hostPort: 13044 + - containerPort: 15044 + name: port-15044 + hostPort: 15044 + - containerPort: 16044 + name: port-16044 + hostPort: 16044 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15044" + - name: SSV_API_PORT + value: "16044" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-44 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-44-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-44 - persistentVolumeClaim: - claimName: ssv-node-44 - - name: ssv-node-44-cm - configMap: - name: ssv-node-44-cm + - name: ssv-node-44 + persistentVolumeClaim: + claimName: ssv-node-44 + - name: ssv-node-44-cm + configMap: + name: ssv-node-44-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml index 81c4760282..89a3dcf3d2 100644 --- a/.k8/hetzner-stage/ssv-node-45-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15045 protocol: TCP targetPort: 15045 - name: port-15045 + name: metrics - port: 16045 protocol: TCP targetPort: 16045 @@ -48,86 +48,86 @@ spec: app: ssv-node-45 spec: containers: - - name: ssv-node-45 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12045 - name: port-12045 - protocol: UDP - hostPort: 12045 - - containerPort: 13045 - name: port-13045 - hostPort: 13045 - - containerPort: 15045 - name: port-15045 - hostPort: 15045 - - containerPort: 16045 - name: port-16045 - hostPort: 16045 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15045" - - name: SSV_API_PORT - value: "16045" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-45 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-45-cm + - name: ssv-node-45 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12045 + name: port-12045 + protocol: UDP + hostPort: 12045 + - containerPort: 13045 + name: port-13045 + hostPort: 13045 + - containerPort: 15045 + name: port-15045 + hostPort: 15045 + - containerPort: 16045 + name: port-16045 + hostPort: 16045 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15045" + - name: SSV_API_PORT + value: "16045" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-45 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-45-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-45 - persistentVolumeClaim: - claimName: ssv-node-45 - - name: ssv-node-45-cm - configMap: - name: ssv-node-45-cm + - name: ssv-node-45 + persistentVolumeClaim: + claimName: ssv-node-45 + - name: ssv-node-45-cm + configMap: + name: ssv-node-45-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml index 57526b672c..57fb291871 100644 --- a/.k8/hetzner-stage/ssv-node-46-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15046 protocol: TCP targetPort: 15046 - name: port-15046 + name: metrics - port: 16046 protocol: TCP targetPort: 16046 @@ -48,86 +48,86 @@ spec: app: ssv-node-46 spec: containers: - - name: ssv-node-46 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12046 - name: port-12046 - protocol: UDP - hostPort: 12046 - - containerPort: 13046 - name: port-13046 - hostPort: 13046 - - containerPort: 15046 - name: port-15046 - hostPort: 15046 - - containerPort: 16046 - name: port-16046 - hostPort: 16046 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15046" - - name: SSV_API_PORT - value: "16046" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-46 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-46-cm + - name: ssv-node-46 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12046 + name: port-12046 + protocol: UDP + hostPort: 12046 + - containerPort: 13046 + name: port-13046 + hostPort: 13046 + - containerPort: 15046 + name: port-15046 + hostPort: 15046 + - containerPort: 16046 + name: port-16046 + hostPort: 16046 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15046" + - name: SSV_API_PORT + value: "16046" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-46 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-46-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-46 - persistentVolumeClaim: - claimName: ssv-node-46 - - name: ssv-node-46-cm - configMap: - name: ssv-node-46-cm + - name: ssv-node-46 + persistentVolumeClaim: + claimName: ssv-node-46 + - name: ssv-node-46-cm + configMap: + name: ssv-node-46-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml index 8d832b2158..78cdb5aa62 100644 --- a/.k8/hetzner-stage/ssv-node-47-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15047 protocol: TCP targetPort: 15047 - name: port-15047 + name: metrics - port: 16047 protocol: TCP targetPort: 16047 @@ -48,86 +48,86 @@ spec: app: ssv-node-47 spec: containers: - - name: ssv-node-47 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12047 - name: port-12047 - protocol: UDP - hostPort: 12047 - - containerPort: 13047 - name: port-13047 - hostPort: 13047 - - containerPort: 15047 - name: port-15047 - hostPort: 15047 - - containerPort: 16047 - name: port-16047 - hostPort: 16047 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15047" - - name: SSV_API_PORT - value: "16047" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-47 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-47-cm + - name: ssv-node-47 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12047 + name: port-12047 + protocol: UDP + hostPort: 12047 + - containerPort: 13047 + name: port-13047 + hostPort: 13047 + - containerPort: 15047 + name: port-15047 + hostPort: 15047 + - containerPort: 16047 + name: port-16047 + hostPort: 16047 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15047" + - name: SSV_API_PORT + value: "16047" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-47 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-47-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-47 - persistentVolumeClaim: - claimName: ssv-node-47 - - name: ssv-node-47-cm - configMap: - name: ssv-node-47-cm + - name: ssv-node-47 + persistentVolumeClaim: + claimName: ssv-node-47 + - name: ssv-node-47-cm + configMap: + name: ssv-node-47-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml index 3c6fcbc533..72a53ac460 100644 --- a/.k8/hetzner-stage/ssv-node-48-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15048 protocol: TCP targetPort: 15048 - name: port-15048 + name: metrics - port: 16048 protocol: TCP targetPort: 16048 @@ -48,86 +48,86 @@ spec: app: ssv-node-48 spec: containers: - - name: ssv-node-48 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12048 - name: port-12048 - protocol: UDP - hostPort: 12048 - - containerPort: 13048 - name: port-13048 - hostPort: 13048 - - containerPort: 15048 - name: port-15048 - hostPort: 15048 - - containerPort: 16048 - name: port-16048 - hostPort: 16048 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15048" - - name: SSV_API_PORT - value: "16048" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-48 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-48-cm + - name: ssv-node-48 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12048 + name: port-12048 + protocol: UDP + hostPort: 12048 + - containerPort: 13048 + name: port-13048 + hostPort: 13048 + - containerPort: 15048 + name: port-15048 + hostPort: 15048 + - containerPort: 16048 + name: port-16048 + hostPort: 16048 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15048" + - name: SSV_API_PORT + value: "16048" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-48 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-48-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-48 - persistentVolumeClaim: - claimName: ssv-node-48 - - name: ssv-node-48-cm - configMap: - name: ssv-node-48-cm + - name: ssv-node-48 + persistentVolumeClaim: + claimName: ssv-node-48 + - name: ssv-node-48-cm + configMap: + name: ssv-node-48-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml index 16c168c0c0..a101e8004f 100644 --- a/.k8/hetzner-stage/ssv-node-49-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15049 protocol: TCP targetPort: 15049 - name: port-15049 + name: metrics - port: 16049 protocol: TCP targetPort: 16049 @@ -48,86 +48,86 @@ spec: app: ssv-node-49 spec: containers: - - name: ssv-node-49 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12049 - name: port-12049 - protocol: UDP - hostPort: 12049 - - containerPort: 13049 - name: port-13049 - hostPort: 13049 - - containerPort: 15049 - name: port-15049 - hostPort: 15049 - - containerPort: 16049 - name: port-16049 - hostPort: 16049 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15049" - - name: SSV_API_PORT - value: "16049" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-49 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-49-cm + - name: ssv-node-49 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12049 + name: port-12049 + protocol: UDP + hostPort: 12049 + - containerPort: 13049 + name: port-13049 + hostPort: 13049 + - containerPort: 15049 + name: port-15049 + hostPort: 15049 + - containerPort: 16049 + name: port-16049 + hostPort: 16049 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15049" + - name: SSV_API_PORT + value: "16049" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-49 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-49-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-49 - persistentVolumeClaim: - claimName: ssv-node-49 - - name: ssv-node-49-cm - configMap: - name: ssv-node-49-cm + - name: ssv-node-49 + persistentVolumeClaim: + claimName: ssv-node-49 + - name: ssv-node-49-cm + configMap: + name: ssv-node-49-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml index 0c4f294174..0079f9643c 100644 --- a/.k8/hetzner-stage/ssv-node-5-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15005 protocol: TCP targetPort: 15005 - name: port-15005 + name: metrics - port: 16005 protocol: TCP targetPort: 16005 @@ -48,86 +48,86 @@ spec: app: ssv-node-5 spec: containers: - - name: ssv-node-5 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12005 - name: port-12005 - protocol: UDP - hostPort: 12005 - - containerPort: 13005 - name: port-13005 - hostPort: 13005 - - containerPort: 15005 - name: port-15005 - hostPort: 15005 - - containerPort: 16005 - name: port-16005 - hostPort: 16005 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15005" - - name: SSV_API_PORT - value: "16005" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-5 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-5-cm + - name: ssv-node-5 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12005 + name: port-12005 + protocol: UDP + hostPort: 12005 + - containerPort: 13005 + name: port-13005 + hostPort: 13005 + - containerPort: 15005 + name: port-15005 + hostPort: 15005 + - containerPort: 16005 + name: port-16005 + hostPort: 16005 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15005" + - name: SSV_API_PORT + value: "16005" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-5 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-5-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-5 - persistentVolumeClaim: - claimName: ssv-node-5 - - name: ssv-node-5-cm - configMap: - name: ssv-node-5-cm + - name: ssv-node-5 + persistentVolumeClaim: + claimName: ssv-node-5 + - name: ssv-node-5-cm + configMap: + name: ssv-node-5-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml index 237964637e..afceccc175 100644 --- a/.k8/hetzner-stage/ssv-node-50-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15050 protocol: TCP targetPort: 15050 - name: port-15050 + name: metrics - port: 16050 protocol: TCP targetPort: 16050 @@ -48,86 +48,86 @@ spec: app: ssv-node-50 spec: containers: - - name: ssv-node-50 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12050 - name: port-12050 - protocol: UDP - hostPort: 12050 - - containerPort: 13050 - name: port-13050 - hostPort: 13050 - - containerPort: 15050 - name: port-15050 - hostPort: 15050 - - containerPort: 16050 - name: port-16050 - hostPort: 16050 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15050" - - name: SSV_API_PORT - value: "16050" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-50 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-50-cm + - name: ssv-node-50 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12050 + name: port-12050 + protocol: UDP + hostPort: 12050 + - containerPort: 13050 + name: port-13050 + hostPort: 13050 + - containerPort: 15050 + name: port-15050 + hostPort: 15050 + - containerPort: 16050 + name: port-16050 + hostPort: 16050 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15050" + - name: SSV_API_PORT + value: "16050" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-50 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-50-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-50 - persistentVolumeClaim: - claimName: ssv-node-50 - - name: ssv-node-50-cm - configMap: - name: ssv-node-50-cm + - name: ssv-node-50 + persistentVolumeClaim: + claimName: ssv-node-50 + - name: ssv-node-50-cm + configMap: + name: ssv-node-50-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml index 028ac33bde..3bb3f2f98d 100644 --- a/.k8/hetzner-stage/ssv-node-51-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15051 protocol: TCP targetPort: 15051 - name: port-15051 + name: metrics - port: 16051 protocol: TCP targetPort: 16051 @@ -48,86 +48,86 @@ spec: app: ssv-node-51 spec: containers: - - name: ssv-node-51 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12051 - name: port-12051 - protocol: UDP - hostPort: 12051 - - containerPort: 13051 - name: port-13051 - hostPort: 13051 - - containerPort: 15051 - name: port-15051 - hostPort: 15051 - - containerPort: 16051 - name: port-16051 - hostPort: 16051 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15051" - - name: SSV_API_PORT - value: "16051" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-51 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-51-cm + - name: ssv-node-51 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12051 + name: port-12051 + protocol: UDP + hostPort: 12051 + - containerPort: 13051 + name: port-13051 + hostPort: 13051 + - containerPort: 15051 + name: port-15051 + hostPort: 15051 + - containerPort: 16051 + name: port-16051 + hostPort: 16051 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15051" + - name: SSV_API_PORT + value: "16051" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-51 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-51-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-51 - persistentVolumeClaim: - claimName: ssv-node-51 - - name: ssv-node-51-cm - configMap: - name: ssv-node-51-cm + - name: ssv-node-51 + persistentVolumeClaim: + claimName: ssv-node-51 + - name: ssv-node-51-cm + configMap: + name: ssv-node-51-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml index 9f2eb3d888..e926abf433 100644 --- a/.k8/hetzner-stage/ssv-node-52-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15052 protocol: TCP targetPort: 15052 - name: port-15052 + name: metrics - port: 16052 protocol: TCP targetPort: 16052 @@ -48,86 +48,86 @@ spec: app: ssv-node-52 spec: containers: - - name: ssv-node-52 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12052 - name: port-12052 - protocol: UDP - hostPort: 12052 - - containerPort: 13052 - name: port-13052 - hostPort: 13052 - - containerPort: 15052 - name: port-15052 - hostPort: 15052 - - containerPort: 16052 - name: port-16052 - hostPort: 16052 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15052" - - name: SSV_API_PORT - value: "16052" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-52 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-52-cm + - name: ssv-node-52 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12052 + name: port-12052 + protocol: UDP + hostPort: 12052 + - containerPort: 13052 + name: port-13052 + hostPort: 13052 + - containerPort: 15052 + name: port-15052 + hostPort: 15052 + - containerPort: 16052 + name: port-16052 + hostPort: 16052 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15052" + - name: SSV_API_PORT + value: "16052" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-52 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-52-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-52 - persistentVolumeClaim: - claimName: ssv-node-52 - - name: ssv-node-52-cm - configMap: - name: ssv-node-52-cm + - name: ssv-node-52 + persistentVolumeClaim: + claimName: ssv-node-52 + - name: ssv-node-52-cm + configMap: + name: ssv-node-52-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml index 68515c515b..427b1229e0 100644 --- a/.k8/hetzner-stage/ssv-node-53-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15053 protocol: TCP targetPort: 15053 - name: port-15053 + name: metrics - port: 16053 protocol: TCP targetPort: 16053 @@ -48,86 +48,86 @@ spec: app: ssv-node-53 spec: containers: - - name: ssv-node-53 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12053 - name: port-12053 - protocol: UDP - hostPort: 12053 - - containerPort: 13053 - name: port-13053 - hostPort: 13053 - - containerPort: 15053 - name: port-15053 - hostPort: 15053 - - containerPort: 16053 - name: port-16053 - hostPort: 16053 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15053" - - name: SSV_API_PORT - value: "16053" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-53 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-53-cm + - name: ssv-node-53 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12053 + name: port-12053 + protocol: UDP + hostPort: 12053 + - containerPort: 13053 + name: port-13053 + hostPort: 13053 + - containerPort: 15053 + name: port-15053 + hostPort: 15053 + - containerPort: 16053 + name: port-16053 + hostPort: 16053 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15053" + - name: SSV_API_PORT + value: "16053" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-53 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-53-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-53 - persistentVolumeClaim: - claimName: ssv-node-53 - - name: ssv-node-53-cm - configMap: - name: ssv-node-53-cm + - name: ssv-node-53 + persistentVolumeClaim: + claimName: ssv-node-53 + - name: ssv-node-53-cm + configMap: + name: ssv-node-53-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml index 9eb12dd56b..7023eb722b 100644 --- a/.k8/hetzner-stage/ssv-node-54-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15054 protocol: TCP targetPort: 15054 - name: port-15054 + name: metrics - port: 16054 protocol: TCP targetPort: 16054 @@ -48,86 +48,86 @@ spec: app: ssv-node-54 spec: containers: - - name: ssv-node-54 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12054 - name: port-12054 - protocol: UDP - hostPort: 12054 - - containerPort: 13054 - name: port-13054 - hostPort: 13054 - - containerPort: 15054 - name: port-15054 - hostPort: 15054 - - containerPort: 16054 - name: port-16054 - hostPort: 16054 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15054" - - name: SSV_API_PORT - value: "16054" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-54 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-54-cm + - name: ssv-node-54 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12054 + name: port-12054 + protocol: UDP + hostPort: 12054 + - containerPort: 13054 + name: port-13054 + hostPort: 13054 + - containerPort: 15054 + name: port-15054 + hostPort: 15054 + - containerPort: 16054 + name: port-16054 + hostPort: 16054 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15054" + - name: SSV_API_PORT + value: "16054" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-54 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-54-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-54 - persistentVolumeClaim: - claimName: ssv-node-54 - - name: ssv-node-54-cm - configMap: - name: ssv-node-54-cm + - name: ssv-node-54 + persistentVolumeClaim: + claimName: ssv-node-54 + - name: ssv-node-54-cm + configMap: + name: ssv-node-54-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml index 05a109197b..38c980334a 100644 --- a/.k8/hetzner-stage/ssv-node-55-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15055 protocol: TCP targetPort: 15055 - name: port-15055 + name: metrics - port: 16055 protocol: TCP targetPort: 16055 @@ -48,86 +48,86 @@ spec: app: ssv-node-55 spec: containers: - - name: ssv-node-55 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12055 - name: port-12055 - protocol: UDP - hostPort: 12055 - - containerPort: 13055 - name: port-13055 - hostPort: 13055 - - containerPort: 15055 - name: port-15055 - hostPort: 15055 - - containerPort: 16055 - name: port-16055 - hostPort: 16055 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15055" - - name: SSV_API_PORT - value: "16055" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-55 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-55-cm + - name: ssv-node-55 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12055 + name: port-12055 + protocol: UDP + hostPort: 12055 + - containerPort: 13055 + name: port-13055 + hostPort: 13055 + - containerPort: 15055 + name: port-15055 + hostPort: 15055 + - containerPort: 16055 + name: port-16055 + hostPort: 16055 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15055" + - name: SSV_API_PORT + value: "16055" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-55 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-55-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-55 - persistentVolumeClaim: - claimName: ssv-node-55 - - name: ssv-node-55-cm - configMap: - name: ssv-node-55-cm + - name: ssv-node-55 + persistentVolumeClaim: + claimName: ssv-node-55 + - name: ssv-node-55-cm + configMap: + name: ssv-node-55-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml index 42c0c59b42..98f2c38df2 100644 --- a/.k8/hetzner-stage/ssv-node-56-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15056 protocol: TCP targetPort: 15056 - name: port-15056 + name: metrics - port: 16056 protocol: TCP targetPort: 16056 @@ -48,86 +48,86 @@ spec: app: ssv-node-56 spec: containers: - - name: ssv-node-56 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12056 - name: port-12056 - protocol: UDP - hostPort: 12056 - - containerPort: 13056 - name: port-13056 - hostPort: 13056 - - containerPort: 15056 - name: port-15056 - hostPort: 15056 - - containerPort: 16056 - name: port-16056 - hostPort: 16056 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15056" - - name: SSV_API_PORT - value: "16056" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-56 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-56-cm + - name: ssv-node-56 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12056 + name: port-12056 + protocol: UDP + hostPort: 12056 + - containerPort: 13056 + name: port-13056 + hostPort: 13056 + - containerPort: 15056 + name: port-15056 + hostPort: 15056 + - containerPort: 16056 + name: port-16056 + hostPort: 16056 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15056" + - name: SSV_API_PORT + value: "16056" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-56 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-56-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-56 - persistentVolumeClaim: - claimName: ssv-node-56 - - name: ssv-node-56-cm - configMap: - name: ssv-node-56-cm + - name: ssv-node-56 + persistentVolumeClaim: + claimName: ssv-node-56 + - name: ssv-node-56-cm + configMap: + name: ssv-node-56-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml index d2d8945516..4d4b7e3826 100644 --- a/.k8/hetzner-stage/ssv-node-57-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15057 protocol: TCP targetPort: 15057 - name: port-15057 + name: metrics - port: 16057 protocol: TCP targetPort: 16057 @@ -48,86 +48,86 @@ spec: app: ssv-node-57 spec: containers: - - name: ssv-node-57 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12057 - name: port-12057 - protocol: UDP - hostPort: 12057 - - containerPort: 13057 - name: port-13057 - hostPort: 13057 - - containerPort: 15057 - name: port-15057 - hostPort: 15057 - - containerPort: 16057 - name: port-16057 - hostPort: 16057 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15057" - - name: SSV_API_PORT - value: "16057" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-57 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-57-cm + - name: ssv-node-57 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12057 + name: port-12057 + protocol: UDP + hostPort: 12057 + - containerPort: 13057 + name: port-13057 + hostPort: 13057 + - containerPort: 15057 + name: port-15057 + hostPort: 15057 + - containerPort: 16057 + name: port-16057 + hostPort: 16057 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15057" + - name: SSV_API_PORT + value: "16057" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-57 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-57-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-57 - persistentVolumeClaim: - claimName: ssv-node-57 - - name: ssv-node-57-cm - configMap: - name: ssv-node-57-cm + - name: ssv-node-57 + persistentVolumeClaim: + claimName: ssv-node-57 + - name: ssv-node-57-cm + configMap: + name: ssv-node-57-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml index d3d20846e4..2984f489db 100644 --- a/.k8/hetzner-stage/ssv-node-58-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15058 protocol: TCP targetPort: 15058 - name: port-15058 + name: metrics - port: 16058 protocol: TCP targetPort: 16058 @@ -48,86 +48,86 @@ spec: app: ssv-node-58 spec: containers: - - name: ssv-node-58 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12058 - name: port-12058 - protocol: UDP - hostPort: 12058 - - containerPort: 13058 - name: port-13058 - hostPort: 13058 - - containerPort: 15058 - name: port-15058 - hostPort: 15058 - - containerPort: 16058 - name: port-16058 - hostPort: 16058 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15058" - - name: SSV_API_PORT - value: "16058" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-58 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-58-cm + - name: ssv-node-58 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12058 + name: port-12058 + protocol: UDP + hostPort: 12058 + - containerPort: 13058 + name: port-13058 + hostPort: 13058 + - containerPort: 15058 + name: port-15058 + hostPort: 15058 + - containerPort: 16058 + name: port-16058 + hostPort: 16058 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15058" + - name: SSV_API_PORT + value: "16058" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-58 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-58-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-58 - persistentVolumeClaim: - claimName: ssv-node-58 - - name: ssv-node-58-cm - configMap: - name: ssv-node-58-cm + - name: ssv-node-58 + persistentVolumeClaim: + claimName: ssv-node-58 + - name: ssv-node-58-cm + configMap: + name: ssv-node-58-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml index 8cefa6ba7b..ababa32081 100644 --- a/.k8/hetzner-stage/ssv-node-59-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15059 protocol: TCP targetPort: 15059 - name: port-15059 + name: metrics - port: 16059 protocol: TCP targetPort: 16059 @@ -48,86 +48,86 @@ spec: app: ssv-node-59 spec: containers: - - name: ssv-node-59 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12059 - name: port-12059 - protocol: UDP - hostPort: 12059 - - containerPort: 13059 - name: port-13059 - hostPort: 13059 - - containerPort: 15059 - name: port-15059 - hostPort: 15059 - - containerPort: 16059 - name: port-16059 - hostPort: 16059 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15059" - - name: SSV_API_PORT - value: "16059" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-59 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-59-cm + - name: ssv-node-59 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12059 + name: port-12059 + protocol: UDP + hostPort: 12059 + - containerPort: 13059 + name: port-13059 + hostPort: 13059 + - containerPort: 15059 + name: port-15059 + hostPort: 15059 + - containerPort: 16059 + name: port-16059 + hostPort: 16059 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15059" + - name: SSV_API_PORT + value: "16059" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-59 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-59-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-59 - persistentVolumeClaim: - claimName: ssv-node-59 - - name: ssv-node-59-cm - configMap: - name: ssv-node-59-cm + - name: ssv-node-59 + persistentVolumeClaim: + claimName: ssv-node-59 + - name: ssv-node-59-cm + configMap: + name: ssv-node-59-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml index 6eff03c297..c5bd225c4e 100644 --- a/.k8/hetzner-stage/ssv-node-6-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15006 protocol: TCP targetPort: 15006 - name: port-15006 + name: metrics - port: 16006 protocol: TCP targetPort: 16006 @@ -48,86 +48,86 @@ spec: app: ssv-node-6 spec: containers: - - name: ssv-node-6 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12006 - name: port-12006 - protocol: UDP - hostPort: 12006 - - containerPort: 13006 - name: port-13006 - hostPort: 13006 - - containerPort: 15006 - name: port-15006 - hostPort: 15006 - - containerPort: 16006 - name: port-16006 - hostPort: 16006 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15006" - - name: SSV_API_PORT - value: "16006" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-6 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-6-cm + - name: ssv-node-6 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12006 + name: port-12006 + protocol: UDP + hostPort: 12006 + - containerPort: 13006 + name: port-13006 + hostPort: 13006 + - containerPort: 15006 + name: port-15006 + hostPort: 15006 + - containerPort: 16006 + name: port-16006 + hostPort: 16006 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15006" + - name: SSV_API_PORT + value: "16006" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-6 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-6-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-6 - persistentVolumeClaim: - claimName: ssv-node-6 - - name: ssv-node-6-cm - configMap: - name: ssv-node-6-cm + - name: ssv-node-6 + persistentVolumeClaim: + claimName: ssv-node-6 + - name: ssv-node-6-cm + configMap: + name: ssv-node-6-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml index ca0b3dc8cd..d2cc8d73f7 100644 --- a/.k8/hetzner-stage/ssv-node-60-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15060 protocol: TCP targetPort: 15060 - name: port-15060 + name: metrics - port: 16060 protocol: TCP targetPort: 16060 @@ -48,86 +48,86 @@ spec: app: ssv-node-60 spec: containers: - - name: ssv-node-60 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12060 - name: port-12060 - protocol: UDP - hostPort: 12060 - - containerPort: 13060 - name: port-13060 - hostPort: 13060 - - containerPort: 15060 - name: port-15060 - hostPort: 15060 - - containerPort: 16060 - name: port-16060 - hostPort: 16060 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15060" - - name: SSV_API_PORT - value: "16060" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-60 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-60-cm + - name: ssv-node-60 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12060 + name: port-12060 + protocol: UDP + hostPort: 12060 + - containerPort: 13060 + name: port-13060 + hostPort: 13060 + - containerPort: 15060 + name: port-15060 + hostPort: 15060 + - containerPort: 16060 + name: port-16060 + hostPort: 16060 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15060" + - name: SSV_API_PORT + value: "16060" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-60 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-60-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-60 - persistentVolumeClaim: - claimName: ssv-node-60 - - name: ssv-node-60-cm - configMap: - name: ssv-node-60-cm + - name: ssv-node-60 + persistentVolumeClaim: + claimName: ssv-node-60 + - name: ssv-node-60-cm + configMap: + name: ssv-node-60-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml index 339c551727..0558536fce 100644 --- a/.k8/hetzner-stage/ssv-node-61-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15061 protocol: TCP targetPort: 15061 - name: port-15061 + name: metrics - port: 16061 protocol: TCP targetPort: 16061 @@ -48,86 +48,86 @@ spec: app: ssv-node-61 spec: containers: - - name: ssv-node-61 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12061 - name: port-12061 - protocol: UDP - hostPort: 12061 - - containerPort: 13061 - name: port-13061 - hostPort: 13061 - - containerPort: 15061 - name: port-15061 - hostPort: 15061 - - containerPort: 16061 - name: port-16061 - hostPort: 16061 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15061" - - name: SSV_API_PORT - value: "16061" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-61 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-61-cm + - name: ssv-node-61 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12061 + name: port-12061 + protocol: UDP + hostPort: 12061 + - containerPort: 13061 + name: port-13061 + hostPort: 13061 + - containerPort: 15061 + name: port-15061 + hostPort: 15061 + - containerPort: 16061 + name: port-16061 + hostPort: 16061 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15061" + - name: SSV_API_PORT + value: "16061" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-61 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-61-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-61 - persistentVolumeClaim: - claimName: ssv-node-61 - - name: ssv-node-61-cm - configMap: - name: ssv-node-61-cm + - name: ssv-node-61 + persistentVolumeClaim: + claimName: ssv-node-61 + - name: ssv-node-61-cm + configMap: + name: ssv-node-61-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml index 531005618a..5652c92467 100644 --- a/.k8/hetzner-stage/ssv-node-62-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15062 protocol: TCP targetPort: 15062 - name: port-15062 + name: metrics - port: 16062 protocol: TCP targetPort: 16062 @@ -48,86 +48,86 @@ spec: app: ssv-node-62 spec: containers: - - name: ssv-node-62 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12062 - name: port-12062 - protocol: UDP - hostPort: 12062 - - containerPort: 13062 - name: port-13062 - hostPort: 13062 - - containerPort: 15062 - name: port-15062 - hostPort: 15062 - - containerPort: 16062 - name: port-16062 - hostPort: 16062 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15062" - - name: SSV_API_PORT - value: "16062" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-62 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-62-cm + - name: ssv-node-62 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12062 + name: port-12062 + protocol: UDP + hostPort: 12062 + - containerPort: 13062 + name: port-13062 + hostPort: 13062 + - containerPort: 15062 + name: port-15062 + hostPort: 15062 + - containerPort: 16062 + name: port-16062 + hostPort: 16062 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15062" + - name: SSV_API_PORT + value: "16062" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-62 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-62-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-62 - persistentVolumeClaim: - claimName: ssv-node-62 - - name: ssv-node-62-cm - configMap: - name: ssv-node-62-cm + - name: ssv-node-62 + persistentVolumeClaim: + claimName: ssv-node-62 + - name: ssv-node-62-cm + configMap: + name: ssv-node-62-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml index 39e261a3bf..2362357907 100644 --- a/.k8/hetzner-stage/ssv-node-63-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15063 protocol: TCP targetPort: 15063 - name: port-15063 + name: metrics - port: 16063 protocol: TCP targetPort: 16063 @@ -48,86 +48,86 @@ spec: app: ssv-node-63 spec: containers: - - name: ssv-node-63 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12063 - name: port-12063 - protocol: UDP - hostPort: 12063 - - containerPort: 13063 - name: port-13063 - hostPort: 13063 - - containerPort: 15063 - name: port-15063 - hostPort: 15063 - - containerPort: 16063 - name: port-16063 - hostPort: 16063 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15063" - - name: SSV_API_PORT - value: "16063" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-63 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-63-cm + - name: ssv-node-63 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12063 + name: port-12063 + protocol: UDP + hostPort: 12063 + - containerPort: 13063 + name: port-13063 + hostPort: 13063 + - containerPort: 15063 + name: port-15063 + hostPort: 15063 + - containerPort: 16063 + name: port-16063 + hostPort: 16063 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15063" + - name: SSV_API_PORT + value: "16063" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-63 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-63-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-63 - persistentVolumeClaim: - claimName: ssv-node-63 - - name: ssv-node-63-cm - configMap: - name: ssv-node-63-cm + - name: ssv-node-63 + persistentVolumeClaim: + claimName: ssv-node-63 + - name: ssv-node-63-cm + configMap: + name: ssv-node-63-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml index 709fc026fa..e44d042cdd 100644 --- a/.k8/hetzner-stage/ssv-node-64-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15064 protocol: TCP targetPort: 15064 - name: port-15064 + name: metrics - port: 16064 protocol: TCP targetPort: 16064 @@ -48,86 +48,86 @@ spec: app: ssv-node-64 spec: containers: - - name: ssv-node-64 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12064 - name: port-12064 - protocol: UDP - hostPort: 12064 - - containerPort: 13064 - name: port-13064 - hostPort: 13064 - - containerPort: 15064 - name: port-15064 - hostPort: 15064 - - containerPort: 16064 - name: port-16064 - hostPort: 16064 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15064" - - name: SSV_API_PORT - value: "16064" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-64 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-64-cm + - name: ssv-node-64 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12064 + name: port-12064 + protocol: UDP + hostPort: 12064 + - containerPort: 13064 + name: port-13064 + hostPort: 13064 + - containerPort: 15064 + name: port-15064 + hostPort: 15064 + - containerPort: 16064 + name: port-16064 + hostPort: 16064 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15064" + - name: SSV_API_PORT + value: "16064" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-64 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-64-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-64 - persistentVolumeClaim: - claimName: ssv-node-64 - - name: ssv-node-64-cm - configMap: - name: ssv-node-64-cm + - name: ssv-node-64 + persistentVolumeClaim: + claimName: ssv-node-64 + - name: ssv-node-64-cm + configMap: + name: ssv-node-64-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml index 7872f5efef..c656cce830 100644 --- a/.k8/hetzner-stage/ssv-node-65-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15065 protocol: TCP targetPort: 15065 - name: port-15065 + name: metrics - port: 16065 protocol: TCP targetPort: 16065 @@ -48,86 +48,86 @@ spec: app: ssv-node-65 spec: containers: - - name: ssv-node-65 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12065 - name: port-12065 - protocol: UDP - hostPort: 12065 - - containerPort: 13065 - name: port-13065 - hostPort: 13065 - - containerPort: 15065 - name: port-15065 - hostPort: 15065 - - containerPort: 16065 - name: port-16065 - hostPort: 16065 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15065" - - name: SSV_API_PORT - value: "16065" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-65 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-65-cm + - name: ssv-node-65 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12065 + name: port-12065 + protocol: UDP + hostPort: 12065 + - containerPort: 13065 + name: port-13065 + hostPort: 13065 + - containerPort: 15065 + name: port-15065 + hostPort: 15065 + - containerPort: 16065 + name: port-16065 + hostPort: 16065 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15065" + - name: SSV_API_PORT + value: "16065" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-65 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-65-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-65 - persistentVolumeClaim: - claimName: ssv-node-65 - - name: ssv-node-65-cm - configMap: - name: ssv-node-65-cm + - name: ssv-node-65 + persistentVolumeClaim: + claimName: ssv-node-65 + - name: ssv-node-65-cm + configMap: + name: ssv-node-65-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml index 8cf3d90cfe..0791fd45c4 100644 --- a/.k8/hetzner-stage/ssv-node-66-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15066 protocol: TCP targetPort: 15066 - name: port-15066 + name: metrics - port: 16066 protocol: TCP targetPort: 16066 @@ -48,86 +48,86 @@ spec: app: ssv-node-66 spec: containers: - - name: ssv-node-66 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12066 - name: port-12066 - protocol: UDP - hostPort: 12066 - - containerPort: 13066 - name: port-13066 - hostPort: 13066 - - containerPort: 15066 - name: port-15066 - hostPort: 15066 - - containerPort: 16066 - name: port-16066 - hostPort: 16066 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15066" - - name: SSV_API_PORT - value: "16066" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-66 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-66-cm + - name: ssv-node-66 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12066 + name: port-12066 + protocol: UDP + hostPort: 12066 + - containerPort: 13066 + name: port-13066 + hostPort: 13066 + - containerPort: 15066 + name: port-15066 + hostPort: 15066 + - containerPort: 16066 + name: port-16066 + hostPort: 16066 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15066" + - name: SSV_API_PORT + value: "16066" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-66 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-66-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-66 - persistentVolumeClaim: - claimName: ssv-node-66 - - name: ssv-node-66-cm - configMap: - name: ssv-node-66-cm + - name: ssv-node-66 + persistentVolumeClaim: + claimName: ssv-node-66 + - name: ssv-node-66-cm + configMap: + name: ssv-node-66-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml index b9620a8b44..c57554c8a9 100644 --- a/.k8/hetzner-stage/ssv-node-67-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15067 protocol: TCP targetPort: 15067 - name: port-15067 + name: metrics - port: 16067 protocol: TCP targetPort: 16067 @@ -48,86 +48,86 @@ spec: app: ssv-node-67 spec: containers: - - name: ssv-node-67 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12067 - name: port-12067 - protocol: UDP - hostPort: 12067 - - containerPort: 13067 - name: port-13067 - hostPort: 13067 - - containerPort: 15067 - name: port-15067 - hostPort: 15067 - - containerPort: 16067 - name: port-16067 - hostPort: 16067 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15067" - - name: SSV_API_PORT - value: "16067" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-67 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-67-cm + - name: ssv-node-67 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12067 + name: port-12067 + protocol: UDP + hostPort: 12067 + - containerPort: 13067 + name: port-13067 + hostPort: 13067 + - containerPort: 15067 + name: port-15067 + hostPort: 15067 + - containerPort: 16067 + name: port-16067 + hostPort: 16067 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15067" + - name: SSV_API_PORT + value: "16067" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-67 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-67-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-67 - persistentVolumeClaim: - claimName: ssv-node-67 - - name: ssv-node-67-cm - configMap: - name: ssv-node-67-cm + - name: ssv-node-67 + persistentVolumeClaim: + claimName: ssv-node-67 + - name: ssv-node-67-cm + configMap: + name: ssv-node-67-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml index b7252d580e..b7ff0d0801 100644 --- a/.k8/hetzner-stage/ssv-node-68-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15068 protocol: TCP targetPort: 15068 - name: port-15068 + name: metrics - port: 16068 protocol: TCP targetPort: 16068 @@ -48,86 +48,86 @@ spec: app: ssv-node-68 spec: containers: - - name: ssv-node-68 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12068 - name: port-12068 - protocol: UDP - hostPort: 12068 - - containerPort: 13068 - name: port-13068 - hostPort: 13068 - - containerPort: 15068 - name: port-15068 - hostPort: 15068 - - containerPort: 16068 - name: port-16068 - hostPort: 16068 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15068" - - name: SSV_API_PORT - value: "16068" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-68 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-68-cm + - name: ssv-node-68 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12068 + name: port-12068 + protocol: UDP + hostPort: 12068 + - containerPort: 13068 + name: port-13068 + hostPort: 13068 + - containerPort: 15068 + name: port-15068 + hostPort: 15068 + - containerPort: 16068 + name: port-16068 + hostPort: 16068 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15068" + - name: SSV_API_PORT + value: "16068" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-68 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-68-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-68 - persistentVolumeClaim: - claimName: ssv-node-68 - - name: ssv-node-68-cm - configMap: - name: ssv-node-68-cm + - name: ssv-node-68 + persistentVolumeClaim: + claimName: ssv-node-68 + - name: ssv-node-68-cm + configMap: + name: ssv-node-68-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml index 6372ddf492..aceb76acdc 100644 --- a/.k8/hetzner-stage/ssv-node-69-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15069 protocol: TCP targetPort: 15069 - name: port-15069 + name: metrics - port: 16069 protocol: TCP targetPort: 16069 @@ -48,86 +48,86 @@ spec: app: ssv-node-69 spec: containers: - - name: ssv-node-69 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12069 - name: port-12069 - protocol: UDP - hostPort: 12069 - - containerPort: 13069 - name: port-13069 - hostPort: 13069 - - containerPort: 15069 - name: port-15069 - hostPort: 15069 - - containerPort: 16069 - name: port-16069 - hostPort: 16069 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15069" - - name: SSV_API_PORT - value: "16069" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-69 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-69-cm + - name: ssv-node-69 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12069 + name: port-12069 + protocol: UDP + hostPort: 12069 + - containerPort: 13069 + name: port-13069 + hostPort: 13069 + - containerPort: 15069 + name: port-15069 + hostPort: 15069 + - containerPort: 16069 + name: port-16069 + hostPort: 16069 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15069" + - name: SSV_API_PORT + value: "16069" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-69 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-69-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-69 - persistentVolumeClaim: - claimName: ssv-node-69 - - name: ssv-node-69-cm - configMap: - name: ssv-node-69-cm + - name: ssv-node-69 + persistentVolumeClaim: + claimName: ssv-node-69 + - name: ssv-node-69-cm + configMap: + name: ssv-node-69-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml index 49101753c1..fce2676451 100644 --- a/.k8/hetzner-stage/ssv-node-7-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15007 protocol: TCP targetPort: 15007 - name: port-15007 + name: metrics - port: 16007 protocol: TCP targetPort: 16007 @@ -48,86 +48,86 @@ spec: app: ssv-node-7 spec: containers: - - name: ssv-node-7 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12007 - name: port-12007 - protocol: UDP - hostPort: 12007 - - containerPort: 13007 - name: port-13007 - hostPort: 13007 - - containerPort: 15007 - name: port-15007 - hostPort: 15007 - - containerPort: 16007 - name: port-16007 - hostPort: 16007 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15007" - - name: SSV_API_PORT - value: "16007" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-7 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-7-cm + - name: ssv-node-7 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12007 + name: port-12007 + protocol: UDP + hostPort: 12007 + - containerPort: 13007 + name: port-13007 + hostPort: 13007 + - containerPort: 15007 + name: port-15007 + hostPort: 15007 + - containerPort: 16007 + name: port-16007 + hostPort: 16007 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15007" + - name: SSV_API_PORT + value: "16007" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-7 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-7-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-7 - persistentVolumeClaim: - claimName: ssv-node-7 - - name: ssv-node-7-cm - configMap: - name: ssv-node-7-cm + - name: ssv-node-7 + persistentVolumeClaim: + claimName: ssv-node-7 + - name: ssv-node-7-cm + configMap: + name: ssv-node-7-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml index d9cb6b3604..759d1dfa32 100644 --- a/.k8/hetzner-stage/ssv-node-70-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15070 protocol: TCP targetPort: 15070 - name: port-15070 + name: metrics - port: 16070 protocol: TCP targetPort: 16070 @@ -48,86 +48,86 @@ spec: app: ssv-node-70 spec: containers: - - name: ssv-node-70 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12070 - name: port-12070 - protocol: UDP - hostPort: 12070 - - containerPort: 13070 - name: port-13070 - hostPort: 13070 - - containerPort: 15070 - name: port-15070 - hostPort: 15070 - - containerPort: 16070 - name: port-16070 - hostPort: 16070 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15070" - - name: SSV_API_PORT - value: "16070" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-70 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-70-cm + - name: ssv-node-70 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12070 + name: port-12070 + protocol: UDP + hostPort: 12070 + - containerPort: 13070 + name: port-13070 + hostPort: 13070 + - containerPort: 15070 + name: port-15070 + hostPort: 15070 + - containerPort: 16070 + name: port-16070 + hostPort: 16070 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15070" + - name: SSV_API_PORT + value: "16070" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-70 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-70-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-70 - persistentVolumeClaim: - claimName: ssv-node-70 - - name: ssv-node-70-cm - configMap: - name: ssv-node-70-cm + - name: ssv-node-70 + persistentVolumeClaim: + claimName: ssv-node-70 + - name: ssv-node-70-cm + configMap: + name: ssv-node-70-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml index cde1e7cd7e..6e69a38a3a 100644 --- a/.k8/hetzner-stage/ssv-node-71-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15071 protocol: TCP targetPort: 15071 - name: port-15071 + name: metrics - port: 16071 protocol: TCP targetPort: 16071 @@ -48,86 +48,86 @@ spec: app: ssv-node-71 spec: containers: - - name: ssv-node-71 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12071 - name: port-12071 - protocol: UDP - hostPort: 12071 - - containerPort: 13071 - name: port-13071 - hostPort: 13071 - - containerPort: 15071 - name: port-15071 - hostPort: 15071 - - containerPort: 16071 - name: port-16071 - hostPort: 16071 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15071" - - name: SSV_API_PORT - value: "16071" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-71 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-71-cm + - name: ssv-node-71 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12071 + name: port-12071 + protocol: UDP + hostPort: 12071 + - containerPort: 13071 + name: port-13071 + hostPort: 13071 + - containerPort: 15071 + name: port-15071 + hostPort: 15071 + - containerPort: 16071 + name: port-16071 + hostPort: 16071 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15071" + - name: SSV_API_PORT + value: "16071" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-71 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-71-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-71 - persistentVolumeClaim: - claimName: ssv-node-71 - - name: ssv-node-71-cm - configMap: - name: ssv-node-71-cm + - name: ssv-node-71 + persistentVolumeClaim: + claimName: ssv-node-71 + - name: ssv-node-71-cm + configMap: + name: ssv-node-71-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml index 11b639df29..f0a54d9fba 100644 --- a/.k8/hetzner-stage/ssv-node-72-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15072 protocol: TCP targetPort: 15072 - name: port-15072 + name: metrics - port: 16072 protocol: TCP targetPort: 16072 @@ -48,86 +48,86 @@ spec: app: ssv-node-72 spec: containers: - - name: ssv-node-72 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12072 - name: port-12072 - protocol: UDP - hostPort: 12072 - - containerPort: 13072 - name: port-13072 - hostPort: 13072 - - containerPort: 15072 - name: port-15072 - hostPort: 15072 - - containerPort: 16072 - name: port-16072 - hostPort: 16072 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15072" - - name: SSV_API_PORT - value: "16072" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-72 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-72-cm + - name: ssv-node-72 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12072 + name: port-12072 + protocol: UDP + hostPort: 12072 + - containerPort: 13072 + name: port-13072 + hostPort: 13072 + - containerPort: 15072 + name: port-15072 + hostPort: 15072 + - containerPort: 16072 + name: port-16072 + hostPort: 16072 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15072" + - name: SSV_API_PORT + value: "16072" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-72 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-72-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-72 - persistentVolumeClaim: - claimName: ssv-node-72 - - name: ssv-node-72-cm - configMap: - name: ssv-node-72-cm + - name: ssv-node-72 + persistentVolumeClaim: + claimName: ssv-node-72 + - name: ssv-node-72-cm + configMap: + name: ssv-node-72-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml index a08bd81e24..f2a09e9c8a 100644 --- a/.k8/hetzner-stage/ssv-node-8-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15008 protocol: TCP targetPort: 15008 - name: port-15008 + name: metrics - port: 16008 protocol: TCP targetPort: 16008 @@ -48,86 +48,86 @@ spec: app: ssv-node-8 spec: containers: - - name: ssv-node-8 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12008 - name: port-12008 - protocol: UDP - hostPort: 12008 - - containerPort: 13008 - name: port-13008 - hostPort: 13008 - - containerPort: 15008 - name: port-15008 - hostPort: 15008 - - containerPort: 16008 - name: port-16008 - hostPort: 16008 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15008" - - name: SSV_API_PORT - value: "16008" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-8 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-8-cm + - name: ssv-node-8 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12008 + name: port-12008 + protocol: UDP + hostPort: 12008 + - containerPort: 13008 + name: port-13008 + hostPort: 13008 + - containerPort: 15008 + name: port-15008 + hostPort: 15008 + - containerPort: 16008 + name: port-16008 + hostPort: 16008 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15008" + - name: SSV_API_PORT + value: "16008" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-8 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-8-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-8 - persistentVolumeClaim: - claimName: ssv-node-8 - - name: ssv-node-8-cm - configMap: - name: ssv-node-8-cm + - name: ssv-node-8 + persistentVolumeClaim: + claimName: ssv-node-8 + - name: ssv-node-8-cm + configMap: + name: ssv-node-8-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml index 1dc1e6c2a3..8f2626f59c 100644 --- a/.k8/hetzner-stage/ssv-node-9-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -20,7 +20,7 @@ spec: - port: 15009 protocol: TCP targetPort: 15009 - name: port-15009 + name: metrics - port: 16009 protocol: TCP targetPort: 16009 @@ -48,86 +48,86 @@ spec: app: ssv-node-9 spec: containers: - - name: ssv-node-9 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - #image: mosheblox/ssv-preview:stage - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12009 - name: port-12009 - protocol: UDP - hostPort: 12009 - - containerPort: 13009 - name: port-13009 - hostPort: 13009 - - containerPort: 15009 - name: port-15009 - hostPort: 15009 - - containerPort: 16009 - name: port-16009 - hostPort: 16009 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-holesky-stage" - - name: NETWORK - value: "holesky-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15009" - - name: SSV_API_PORT - value: "16009" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-9 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-node-9-cm + - name: ssv-node-9 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12009 + name: port-12009 + protocol: UDP + hostPort: 12009 + - containerPort: 13009 + name: port-13009 + hostPort: 13009 + - containerPort: 15009 + name: port-15009 + hostPort: 15009 + - containerPort: 16009 + name: port-16009 + hostPort: 16009 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15009" + - name: SSV_API_PORT + value: "16009" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-9 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-9-cm imagePullSecrets: - name: ecr-repo volumes: - - name: ssv-node-9 - persistentVolumeClaim: - claimName: ssv-node-9 - - name: ssv-node-9-cm - configMap: - name: ssv-node-9-cm + - name: ssv-node-9 + persistentVolumeClaim: + claimName: ssv-node-9 + - name: ssv-node-9-cm + configMap: + name: ssv-node-9-cm hostNetwork: true From 7dcbc617621b6f34c6ad499acd6a432cc39557b3 Mon Sep 17 00:00:00 2001 From: Taiga Date: Mon, 6 Nov 2023 14:33:08 +0400 Subject: [PATCH 31/54] Add label to services --- .k8/hetzner-stage/ssv-node-1-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-10-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-11-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-12-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-13-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-14-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-15-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-16-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-17-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-18-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-19-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-2-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-20-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-21-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-22-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-23-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-24-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-25-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-26-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-27-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-28-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-29-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-3-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-30-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-31-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-32-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-33-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-34-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-35-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-36-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-37-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-38-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-39-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-4-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-40-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-41-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-42-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-43-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-44-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-45-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-46-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-47-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-48-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-49-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-5-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-50-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-51-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-52-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-53-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-54-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-55-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-56-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-57-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-58-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-59-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-6-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-60-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-61-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-62-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-63-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-64-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-65-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-66-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-67-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-68-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-69-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-7-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-70-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-71-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-72-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-8-deployment.yml | 1 + .k8/hetzner-stage/ssv-node-9-deployment.yml | 1 + 72 files changed, 72 insertions(+) diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml index 3287e66997..82717644ef 100644 --- a/.k8/hetzner-stage/ssv-node-1-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-1 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml index 194eb85750..38a9d42ef4 100644 --- a/.k8/hetzner-stage/ssv-node-10-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-10 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml index 2fcd34b6bc..fdba8e8e06 100644 --- a/.k8/hetzner-stage/ssv-node-11-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-11 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml index 2f29d4be43..39c53376d5 100644 --- a/.k8/hetzner-stage/ssv-node-12-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-12 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml index a6d64d39b0..45e8cd4c4f 100644 --- a/.k8/hetzner-stage/ssv-node-13-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-13 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml index 113d4abc7a..ba020c030a 100644 --- a/.k8/hetzner-stage/ssv-node-14-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-14 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml index 1e02ab3230..95ccb82b6d 100644 --- a/.k8/hetzner-stage/ssv-node-15-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-15 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml index d4d8a3802a..b7a2c083b2 100644 --- a/.k8/hetzner-stage/ssv-node-16-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-16 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml index f27cc45f04..476514b747 100644 --- a/.k8/hetzner-stage/ssv-node-17-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-17 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml index 3df713b625..c50e3a869a 100644 --- a/.k8/hetzner-stage/ssv-node-18-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-18 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml index 878654b288..a5e0fff55a 100644 --- a/.k8/hetzner-stage/ssv-node-19-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-19 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml index 1d6ff55534..38158a10d5 100644 --- a/.k8/hetzner-stage/ssv-node-2-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-2 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml index b3e800c134..2c22aa5d10 100644 --- a/.k8/hetzner-stage/ssv-node-20-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-20 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml index 7983c59b02..cebae4fbe7 100644 --- a/.k8/hetzner-stage/ssv-node-21-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-21 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml index 914c166ad5..425703ca22 100644 --- a/.k8/hetzner-stage/ssv-node-22-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-22 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-23-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml index f890622b3e..203b439712 100644 --- a/.k8/hetzner-stage/ssv-node-23-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-23 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-24-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml index 30ca48aaad..5a8d052145 100644 --- a/.k8/hetzner-stage/ssv-node-24-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-24 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-25-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml index 21c2b30e36..f09b90cd66 100644 --- a/.k8/hetzner-stage/ssv-node-25-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-25 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-26-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml index 75691eee89..0bfde7769a 100644 --- a/.k8/hetzner-stage/ssv-node-26-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-26 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml index 9c0f90e5f3..1dc139c85a 100644 --- a/.k8/hetzner-stage/ssv-node-27-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-27 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml index 3b61f9e7e0..ac89f6e95d 100644 --- a/.k8/hetzner-stage/ssv-node-28-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-28 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml index 2b9a312a16..1193a78621 100644 --- a/.k8/hetzner-stage/ssv-node-29-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-29 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml index a001e63afd..18fbc20c03 100644 --- a/.k8/hetzner-stage/ssv-node-3-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-3 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml index 1b5b7bd0db..4a5c58ab27 100644 --- a/.k8/hetzner-stage/ssv-node-30-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-30 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml index a0b439ec11..00fd954c05 100644 --- a/.k8/hetzner-stage/ssv-node-31-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-31 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml index f7e7bd5d21..36f1090ec3 100644 --- a/.k8/hetzner-stage/ssv-node-32-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-32 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml index 022fdea98e..47a0b113f6 100644 --- a/.k8/hetzner-stage/ssv-node-33-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-33 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml index ea73c678b5..387a834820 100644 --- a/.k8/hetzner-stage/ssv-node-34-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-34 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml index 934c13dad8..043ddafb9b 100644 --- a/.k8/hetzner-stage/ssv-node-35-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-35 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml index a0a16967a0..b2c2e0a89d 100644 --- a/.k8/hetzner-stage/ssv-node-36-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-36 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml index 46ae2171ca..25b640e23e 100644 --- a/.k8/hetzner-stage/ssv-node-37-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-37 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml index 4d174185bc..7f4858f2de 100644 --- a/.k8/hetzner-stage/ssv-node-38-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-38 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml index a32b658d36..322bca8ede 100644 --- a/.k8/hetzner-stage/ssv-node-39-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-39 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml index 9e86515560..0f713f256e 100644 --- a/.k8/hetzner-stage/ssv-node-4-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-4 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml index 2510a5e0f4..baa40ea8b1 100644 --- a/.k8/hetzner-stage/ssv-node-40-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-40 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml index b7af959dce..a066f20316 100644 --- a/.k8/hetzner-stage/ssv-node-41-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-41 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml index 763e462ebd..9a90886eda 100644 --- a/.k8/hetzner-stage/ssv-node-42-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-42 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml index 78ef2ed8a9..c6c08613de 100644 --- a/.k8/hetzner-stage/ssv-node-43-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-43 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml index 2e7a8fb096..b533b6bcfc 100644 --- a/.k8/hetzner-stage/ssv-node-44-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-44 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml index 89a3dcf3d2..dd4e94430d 100644 --- a/.k8/hetzner-stage/ssv-node-45-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-45 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml index 57fb291871..8ce5fc8625 100644 --- a/.k8/hetzner-stage/ssv-node-46-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-46 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml index 78cdb5aa62..20f13789b7 100644 --- a/.k8/hetzner-stage/ssv-node-47-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-47 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml index 72a53ac460..e750831e12 100644 --- a/.k8/hetzner-stage/ssv-node-48-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-48 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml index a101e8004f..350802f021 100644 --- a/.k8/hetzner-stage/ssv-node-49-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-49 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml index 0079f9643c..c94a30acf9 100644 --- a/.k8/hetzner-stage/ssv-node-5-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-5 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml index afceccc175..4e2bbebce8 100644 --- a/.k8/hetzner-stage/ssv-node-50-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-50 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml index 3bb3f2f98d..7e46ea6560 100644 --- a/.k8/hetzner-stage/ssv-node-51-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-51 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml index e926abf433..c676691f8c 100644 --- a/.k8/hetzner-stage/ssv-node-52-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-52 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml index 427b1229e0..678301bc85 100644 --- a/.k8/hetzner-stage/ssv-node-53-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-53 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml index 7023eb722b..c91ec66e42 100644 --- a/.k8/hetzner-stage/ssv-node-54-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-54 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml index 38c980334a..f2b44873e0 100644 --- a/.k8/hetzner-stage/ssv-node-55-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-55 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml index 98f2c38df2..bdb312eb7e 100644 --- a/.k8/hetzner-stage/ssv-node-56-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-56 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml index 4d4b7e3826..3eff5b03c4 100644 --- a/.k8/hetzner-stage/ssv-node-57-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-57 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml index 2984f489db..cdaf3bcb23 100644 --- a/.k8/hetzner-stage/ssv-node-58-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-58 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml index ababa32081..b7b1861792 100644 --- a/.k8/hetzner-stage/ssv-node-59-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-59 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml index c5bd225c4e..3bfbc7ed26 100644 --- a/.k8/hetzner-stage/ssv-node-6-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-6 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml index d2cc8d73f7..a7a7285a6d 100644 --- a/.k8/hetzner-stage/ssv-node-60-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-60 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml index 0558536fce..6ac244e496 100644 --- a/.k8/hetzner-stage/ssv-node-61-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-61 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml index 5652c92467..d257378b74 100644 --- a/.k8/hetzner-stage/ssv-node-62-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-62 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml index 2362357907..43912423b9 100644 --- a/.k8/hetzner-stage/ssv-node-63-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-63 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml index e44d042cdd..3a9f0fa5e0 100644 --- a/.k8/hetzner-stage/ssv-node-64-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-64 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml index c656cce830..837cff925a 100644 --- a/.k8/hetzner-stage/ssv-node-65-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-65 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml index 0791fd45c4..f76842606c 100644 --- a/.k8/hetzner-stage/ssv-node-66-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-66 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml index c57554c8a9..d9305f5293 100644 --- a/.k8/hetzner-stage/ssv-node-67-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-67 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml index b7ff0d0801..566fcdc221 100644 --- a/.k8/hetzner-stage/ssv-node-68-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-68 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml index aceb76acdc..29f507ea95 100644 --- a/.k8/hetzner-stage/ssv-node-69-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-69 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml index fce2676451..8493eb8870 100644 --- a/.k8/hetzner-stage/ssv-node-7-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-7 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml index 759d1dfa32..a501a099ed 100644 --- a/.k8/hetzner-stage/ssv-node-70-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-70 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml index 6e69a38a3a..b6c1bfa74b 100644 --- a/.k8/hetzner-stage/ssv-node-71-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-71 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml index f0a54d9fba..33c5a2d0ce 100644 --- a/.k8/hetzner-stage/ssv-node-72-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-72 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml index f2a09e9c8a..1f13447479 100644 --- a/.k8/hetzner-stage/ssv-node-8-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-8 + prometheus/app: ssv-node spec: type: ClusterIP ports: diff --git a/.k8/hetzner-stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml index 8f2626f59c..04f979521c 100644 --- a/.k8/hetzner-stage/ssv-node-9-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -6,6 +6,7 @@ metadata: namespace: REPLACE_NAMESPACE labels: app: ssv-node-9 + prometheus/app: ssv-node spec: type: ClusterIP ports: From 58dfed274b275911f0bdf599eb100ee0d3299f0c Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Mon, 6 Nov 2023 18:26:40 +0200 Subject: [PATCH 32/54] fix: force start duties in `ValidatorRegistrationRunner` (#1188) * fix: force start duties in `ValidatorRegistrationRunner` * update to spec PR * update spec JSONs --------- Co-authored-by: Lior Rutenberg --- go.mod | 5 ++++- go.sum | 4 ++-- protocol/v2/ssv/runner/runner.go | 5 +++-- protocol/v2/ssv/runner/validator_registration.go | 8 +------- protocol/v2/ssv/runner/voluntary_exit.go | 8 +------- protocol/v2/ssv/spectest/ssv_mapping_test.go | 12 +++++++----- 6 files changed, 18 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index b39d5e0cc9..dd15d92b49 100644 --- a/go.mod +++ b/go.mod @@ -223,5 +223,8 @@ replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f -//TODO remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 +// TODO: remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 replace github.com/bloxapp/eth2-key-manager => github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 + +// TODO: remove this replace when the following PR is merged https://github.com/bloxapp/ssv-spec/pull +replace github.com/bloxapp/ssv-spec => github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a diff --git a/go.sum b/go.sum index cf4040a7be..4aabd85bc6 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,6 @@ github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHl github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 h1:vjrMmMH15Bo0QF+228CuEZvCI+OuPyJRco82Gj/WyTI= github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= -github.com/bloxapp/ssv-spec v0.3.3 h1:iNomqWQjxDDQouHMjl27PmH1hUolJ4u8QQ+HX/TQQcg= -github.com/bloxapp/ssv-spec v0.3.3/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -521,6 +519,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a h1:I9d5JgkAFj3twwA3D7KkMxvp1wvMnkScmSGDvlf8J7o= +github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index 1fc2225e15..1a06df1e28 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -271,9 +271,10 @@ func (b *BaseRunner) hasRunningDuty() bool { } func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { - if b.QBFTController.Height >= specqbft.Height(duty.Slot) { + // assume StartingDuty is not nil if state is not nil + if b.State != nil && b.State.StartingDuty.Slot >= duty.Slot { return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, - b.QBFTController.Height) + b.State.StartingDuty.Slot) } return nil } diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 68bc4351b8..3232fdff3e 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -54,13 +54,7 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because - // that requires a QBFTController which ValidatorRegistrationRunner doesn't have. - if r.HasRunningDuty() { - return errors.New("already running duty") - } - r.BaseRunner.baseSetupForNewDuty(duty) - return r.executeDuty(logger, duty) + return r.BaseRunner.baseStartNewDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go index 7eba30c616..b569f853cc 100644 --- a/protocol/v2/ssv/runner/voluntary_exit.go +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -50,13 +50,7 @@ func NewVoluntaryExitRunner( } func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because - // that requires a QBFTController which VoluntaryExitRunner doesn't have. - if r.HasRunningDuty() { - return errors.New("already running duty") - } - r.BaseRunner.baseSetupForNewDuty(duty) - return r.executeDuty(logger, duty) + return r.BaseRunner.baseStartNewDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index 14fac24b35..fae52f8e21 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -175,11 +175,13 @@ func newRunnerDutySpecTestFromMap(t *testing.T, m map[string]interface{}) *Start require.NoError(t, json.Unmarshal(byts, duty)) outputMsgs := make([]*spectypes.SignedPartialSignatureMessage, 0) - for _, msg := range m["OutputMessages"].([]interface{}) { - byts, _ = json.Marshal(msg) - typedMsg := &spectypes.SignedPartialSignatureMessage{} - require.NoError(t, json.Unmarshal(byts, typedMsg)) - outputMsgs = append(outputMsgs, typedMsg) + if v, ok := m["OutputMessages"].([]interface{}); ok { + for _, msg := range v { + byts, _ = json.Marshal(msg) + typedMsg := &spectypes.SignedPartialSignatureMessage{} + require.NoError(t, json.Unmarshal(byts, typedMsg)) + outputMsgs = append(outputMsgs, typedMsg) + } } ks := testingutils.KeySetForShare(&spectypes.Share{Quorum: uint64(baseRunnerMap["Share"].(map[string]interface{})["Quorum"].(float64))}) From d730048784eb66f9c58f3650ab040dec1916b1ff Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Mon, 6 Nov 2023 22:26:40 +0200 Subject: [PATCH 33/54] Fix initial metadata fetch (#1192) * Update operator/validator/controller.go Co-authored-by: rehs0y --------- Co-authored-by: rehs0y --- message/validation/errors.go | 2 +- operator/validator/controller.go | 46 +++++++++++++++++++------------- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/message/validation/errors.go b/message/validation/errors.go index f27d3b4901..29d631675c 100644 --- a/message/validation/errors.go +++ b/message/validation/errors.go @@ -45,7 +45,7 @@ func (e Error) Text() string { var ( ErrEmptyData = Error{text: "empty data"} - ErrWrongDomain = Error{text: "wrong domain"} + ErrWrongDomain = Error{text: "wrong domain", silent: true} ErrNoShareMetadata = Error{text: "share has no metadata"} ErrUnknownValidator = Error{text: "unknown validator"} ErrValidatorLiquidated = Error{text: "validator is liquidated"} diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 0218c1862c..05182a9843 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -387,12 +387,38 @@ func (c *controller) StartValidators() { return } - shares := c.sharesStorage.List(nil, registrystorage.ByOperatorID(c.GetOperatorData().ID), registrystorage.ByNotLiquidated()) + shares := c.sharesStorage.List(nil, registrystorage.ByNotLiquidated()) if len(shares) == 0 { c.logger.Info("could not find validators") return } - c.setupValidators(shares) + + var ownShares []*ssvtypes.SSVShare + var allPubKeys = make([][]byte, 0, len(shares)) + ownOpID := c.GetOperatorData().ID + for _, share := range shares { + if share.BelongsToOperator(ownOpID) { + ownShares = append(ownShares, share) + } + allPubKeys = append(allPubKeys, share.ValidatorPubKey) + } + + // Start own validators. + c.setupValidators(ownShares) + + // Fetch metadata for all validators. + start := time.Now() + err := beaconprotocol.UpdateValidatorsMetadata(c.logger, allPubKeys, c, c.beacon, c.onMetadataUpdated) + if err != nil { + c.logger.Error("failed to update validators metadata after setup", + zap.Int("shares", len(allPubKeys)), + fields.Took(time.Since(start)), + zap.Error(err)) + } else { + c.logger.Debug("updated validators metadata after setup", + zap.Int("shares", len(allPubKeys)), + fields.Took(time.Since(start))) + } } // setupValidators setup and starts validators from the given shares. @@ -419,22 +445,6 @@ func (c *controller) setupValidators(shares []*ssvtypes.SSVShare) { c.logger.Info("setup validators done", zap.Int("map size", c.validatorsMap.Size()), zap.Int("failures", len(errs)), zap.Int("missing_metadata", len(fetchMetadata)), zap.Int("shares", len(shares)), zap.Int("started", started)) - - // Try to fetch metadata once for validators that don't have it. - if len(fetchMetadata) > 0 { - start := time.Now() - err := beaconprotocol.UpdateValidatorsMetadata(c.logger, fetchMetadata, c, c.beacon, c.onMetadataUpdated) - if err != nil { - c.logger.Error("failed to update validators metadata after setup", - zap.Int("shares", len(fetchMetadata)), - fields.Took(time.Since(start)), - zap.Error(err)) - } else { - c.logger.Debug("updated validators metadata after setup", - zap.Int("shares", len(fetchMetadata)), - fields.Took(time.Since(start))) - } - } } // setupNonCommitteeValidators trigger SyncHighestDecided for each validator From 593f27dbcf5575498bcf7450cc5ccafbc5aceef3 Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Mon, 6 Nov 2023 21:43:09 +0100 Subject: [PATCH 34/54] holesky testnet config (#1187) * Setup ci/cd for Holesky on prod * Commented ssv holesky nodes and boot node * holeskytestnet config --------- Co-authored-by: stoyan.peev --- .gitlab-ci.yml | 35 +++- .../holesky/boot-node-holesky-deployment.yml | 127 ++++++++++++ .../holesky/scripts/deploy-boot-nodes.sh | 128 ++++++++++++ .../holesky/scripts/deploy-cluster-1--4.sh | 131 +++++++++++++ .../holesky/scripts/deploy-exporters.sh | 118 ++++++++++++ .../ssv-full-node-holesky-deployment.yml | 182 ++++++++++++++++++ .../holesky/ssv-node-holesky-1-deployment.yml | 132 +++++++++++++ .../holesky/ssv-node-holesky-2-deployment.yml | 132 +++++++++++++ .../holesky/ssv-node-holesky-3-deployment.yml | 132 +++++++++++++ .../holesky/ssv-node-holesky-4-deployment.yml | 132 +++++++++++++ networkconfig/config.go | 1 + networkconfig/holesky.go | 22 +++ 12 files changed, 1262 insertions(+), 10 deletions(-) create mode 100644 .k8/production/holesky/boot-node-holesky-deployment.yml create mode 100755 .k8/production/holesky/scripts/deploy-boot-nodes.sh create mode 100755 .k8/production/holesky/scripts/deploy-cluster-1--4.sh create mode 100755 .k8/production/holesky/scripts/deploy-exporters.sh create mode 100644 .k8/production/holesky/ssv-full-node-holesky-deployment.yml create mode 100644 .k8/production/holesky/ssv-node-holesky-1-deployment.yml create mode 100644 .k8/production/holesky/ssv-node-holesky-2-deployment.yml create mode 100644 .k8/production/holesky/ssv-node-holesky-3-deployment.yml create mode 100644 .k8/production/holesky/ssv-node-holesky-4-deployment.yml create mode 100644 networkconfig/holesky.go diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2faf940701..2634628aaf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -136,11 +136,21 @@ Deploy nodes to prod: - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl - chmod +x ./kubectl - mv ./kubectl /usr/bin/kubectl + # +-------------------------------+ + # | 🟠 Deploy SSV Holesky nodes | + # +-------------------------------+ + - .k8/production/holesky/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 + # + # +-------------------------------+ + # │ 🟠 Deploy Holesky Bootnode | + # +-------------------------------+ + # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ + #- .k8/production/holesky/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +---------------------------+ # | 🟠 Deploy SSV Prater nodes | # +---------------------------+ - - .k8/production/prater/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 + #- .k8/production/prater/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +----------------------------+ # | 🔴 Deploy SSV Mainnet nodes | @@ -148,15 +158,15 @@ Deploy nodes to prod: # - .k8/production/mainnet/scripts/deploy-cluster-1-4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # - # +--------------------------+ + # +-----------------------------+ # │ 🟠 Deploy Prater Bootnode | - # +--------------------------+ + # +-----------------------------+ # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ # - .k8/production/prater/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # - # +---------------------------+ + # +------------------------------+ # │ 🔴 Deploy Mainnet Bootnode | - # +---------------------------+ + # +------------------------------+ # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ # - .k8/production/mainnet/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 @@ -178,14 +188,19 @@ Deploy exporter to prod: - chmod +x ./kubectl - mv ./kubectl /usr/bin/kubectl # - # +---------------------------+ + # +-------------------------------+ + # | 🟠 Deploy Holesky exporter | + # +-------------------------------+ + - .k8/production/holesky/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + # + # +------------------------------+ # | 🟠 Deploy Prater exporter | - # +---------------------------+ - - .k8/production/prater/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + # +------------------------------+ + # - .k8/production/prater/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT # - # +---------------------------+ + # +------------------------------+ # │ 🔴 Deploy Mainnet exporter | - # +---------------------------+ + # +------------------------------+ # - .k8/production/mainnet/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT only: diff --git a/.k8/production/holesky/boot-node-holesky-deployment.yml b/.k8/production/holesky/boot-node-holesky-deployment.yml new file mode 100644 index 0000000000..daa89e7c9b --- /dev/null +++ b/.k8/production/holesky/boot-node-holesky-deployment.yml @@ -0,0 +1,127 @@ +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: boot-node-holesky + namespace: REPLACE_NAMESPACE +spec: + hosts: + - "boot-node-holesky.REPLACE_DOMAIN_SUFFIX" + gateways: + - boot-node-holesky + http: + - route: + - destination: + host: boot-node-holesky-svc + port: + number: 5003 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: boot-node-holesky + namespace: REPLACE_NAMESPACE +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "boot-node-holesky.REPLACE_DOMAIN_SUFFIX" +--- +apiVersion: v1 +kind: Service +metadata: + name: boot-node-holesky-svc + namespace: REPLACE_NAMESPACE + labels: + app: boot-node-holesky +spec: + type: ClusterIP + ports: + - port: 4003 + protocol: UDP + targetPort: 4003 + name: port-4003 + - port: 5003 + protocol: TCP + targetPort: 5003 + name: port-5003 + selector: + app: boot-node-holesky +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: boot-node-holesky + name: boot-node-holesky + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: boot-node-holesky + template: + metadata: + labels: + app: boot-node-holesky + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-boot-node + containers: + - name: boot-node-holesky + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + command: ["make", "start-boot-node"] + ports: + - containerPort: 5003 + name: port-5003 + hostPort: 5003 + env: + - name: CONFIG_PATH + value: /config/config.example.yaml + - name: BOOT_NODE_PRIVATE_KEY + valueFrom: + secretKeyRef: + name: config-secrets + key: boot_node_holesky_private_key + - name: BOOT_NODE_EXTERNAL_IP + valueFrom: + secretKeyRef: + name: config-secrets + key: boot_node_holesky_external_ip + - name: TCP_PORT + value: "5003" + - name: UDP_PORT + value: "4003" + volumeMounts: + - mountPath: /data/bootnode + name: boot-node-holesky + - mountPath: /data/config.yaml + name: boot-node-holesky-cm + volumes: + - name: boot-node-holesky + persistentVolumeClaim: + claimName: boot-node-holesky + - configMap: + defaultMode: 420 + name: boot-node-holesky-cm + name: boot-node-holesky-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/scripts/deploy-boot-nodes.sh b/.k8/production/holesky/scripts/deploy-boot-nodes.sh new file mode 100755 index 0000000000..bdf1f9dcff --- /dev/null +++ b/.k8/production/holesky/scripts/deploy-boot-nodes.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/production/holesky" +DEPLOY_FILES=( + "boot-node-holesky-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/production/holesky/scripts/deploy-cluster-1--4.sh b/.k8/production/holesky/scripts/deploy-cluster-1--4.sh new file mode 100755 index 0000000000..cae1c6ca67 --- /dev/null +++ b/.k8/production/holesky/scripts/deploy-cluster-1--4.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/production/holesky" +DEPLOY_FILES=( + "ssv-node-holesky-1-deployment.yml" + "ssv-node-holesky-2-deployment.yml" + "ssv-node-holesky-3-deployment.yml" + "ssv-node-holesky-4-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/production/holesky/scripts/deploy-exporters.sh b/.k8/production/holesky/scripts/deploy-exporters.sh new file mode 100755 index 0000000000..794ab468b3 --- /dev/null +++ b/.k8/production/holesky/scripts/deploy-exporters.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/production/holesky" +DEPLOY_FILES=( + "ssv-full-node-holesky-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/production/holesky/ssv-full-node-holesky-deployment.yml b/.k8/production/holesky/ssv-full-node-holesky-deployment.yml new file mode 100644 index 0000000000..dd61084842 --- /dev/null +++ b/.k8/production/holesky/ssv-full-node-holesky-deployment.yml @@ -0,0 +1,182 @@ +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + hosts: + - "ws-ssv-full-node-holesky-1.REPLACE_DOMAIN_SUFFIX" + gateways: + - ssv-full-node-holesky-1 + http: + - route: + - destination: + host: ssv-full-node-holesky-1 + port: + number: 14021 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + selector: + istio: ingressgateway-int + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "ws-ssv-full-node-holesky-1.REPLACE_DOMAIN_SUFFIX" +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE + labels: + app: ssv-full-node-holesky-1 +spec: + type: ClusterIP + ports: + - port: 12021 + protocol: UDP + targetPort: 12021 + name: port-12021 + - port: 13021 + protocol: TCP + targetPort: 13021 + name: port-13021 + - port: 14021 + protocol: TCP + targetPort: 14021 + name: port-14021 + - port: 15021 + protocol: TCP + targetPort: 15021 + name: port-15021 + - port: 16021 + protocol: TCP + targetPort: 16021 + name: port-16021 + selector: + app: ssv-full-node-holesky-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-full-node-holesky-1 + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-full-node-holesky-1 + template: + metadata: + labels: + app: ssv-full-node-holesky-1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-full-node-holesky-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_EXPORTER_CPU_LIMIT + memory: REPLACE_EXPORTER_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + hostPort: 12021 + protocol: UDP + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 14021 + name: port-14021 + hostPort: 14021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: UDP_PORT + value: "12021" + - name: TCP_PORT + value: "13021" + - name: WS_API_PORT + value: "14021" + - name: FULLNODE + value: "true" + - name: EXPORTER + value: "true" + - name: MSG_WORKERS_COUNT + value: "1024" + - name: MSG_WORKER_BUFFER_SIZE + value: "2048" + - name: SUBNETS + value: "0xffffffffffffffffffffffffffffffff" + - name: P2P_MAX_PEERS + value: "300" + volumeMounts: + - mountPath: /data + name: ssv-full-node-holesky-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-full-node-holesky-1-cm + volumes: + - name: ssv-full-node-holesky-1 + persistentVolumeClaim: + claimName: ssv-full-node-holesky-1 + - name: ssv-full-node-holesky-1-cm + configMap: + name: ssv-full-node-holesky-1-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-1-deployment.yml b/.k8/production/holesky/ssv-node-holesky-1-deployment.yml new file mode 100644 index 0000000000..bcf728d701 --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-1-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-1-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-1 +spec: + type: ClusterIP + ports: + - port: 12022 + protocol: UDP + targetPort: 12022 + name: port-12022 + - port: 13022 + protocol: TCP + targetPort: 13022 + name: port-13022 + - port: 15022 + protocol: TCP + targetPort: 15022 + name: port-15022 + - port: 16022 + protocol: TCP + targetPort: 16022 + name: port-16022 + selector: + app: ssv-node-holesky-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-1 + name: ssv-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-1 + template: + metadata: + labels: + app: ssv-node-holesky-1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + hostPort: 12022 + protocol: UDP + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-1 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-1-cm + volumes: + - name: ssv-node-holesky-1 + persistentVolumeClaim: + claimName: ssv-node-holesky-1 + - name: ssv-node-holesky-1-cm + configMap: + name: ssv-node-holesky-1-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-2-deployment.yml b/.k8/production/holesky/ssv-node-holesky-2-deployment.yml new file mode 100644 index 0000000000..824db2efed --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-2-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-2-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-2 +spec: + type: ClusterIP + ports: + - port: 12012 + protocol: UDP + targetPort: 12023 + name: port-12023 + - port: 13023 + protocol: TCP + targetPort: 13023 + name: port-13023 + - port: 15023 + protocol: TCP + targetPort: 15023 + name: port-15023 + - port: 16023 + protocol: TCP + targetPort: 16023 + name: port-16023 + selector: + app: ssv-node-holesky-2 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-2 + name: ssv-node-holesky-2 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-2 + template: + metadata: + labels: + app: ssv-node-holesky-2 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12023 + name: port-12023 + hostPort: 12023 + protocol: UDP + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15023" + - name: SSV_API_PORT + value: "16023" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-2 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-2-cm + volumes: + - name: ssv-node-holesky-2 + persistentVolumeClaim: + claimName: ssv-node-holesky-2 + - name: ssv-node-holesky-2-cm + configMap: + name: ssv-node-holesky-2-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-3-deployment.yml b/.k8/production/holesky/ssv-node-holesky-3-deployment.yml new file mode 100644 index 0000000000..0104fc6ee3 --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-3-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-3-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-3 +spec: + type: ClusterIP + ports: + - port: 12024 + protocol: UDP + targetPort: 12024 + name: port-12024 + - port: 13024 + protocol: TCP + targetPort: 13024 + name: port-13024 + - port: 15024 + protocol: TCP + targetPort: 15024 + name: port-15024 + - port: 16024 + protocol: TCP + targetPort: 16024 + name: port-16024 + selector: + app: ssv-node-holesky-3 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-3 + name: ssv-node-holesky-3 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-3 + template: + metadata: + labels: + app: ssv-node-holesky-3 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12024 + name: port-12024 + hostPort: 12024 + protocol: UDP + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15024" + - name: SSV_API_PORT + value: "16024" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-3 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-3-cm + volumes: + - name: ssv-node-holesky-3 + persistentVolumeClaim: + claimName: ssv-node-holesky-3 + - name: ssv-node-holesky-3-cm + configMap: + name: ssv-node-holesky-3-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-4-deployment.yml b/.k8/production/holesky/ssv-node-holesky-4-deployment.yml new file mode 100644 index 0000000000..1b454b7814 --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-4-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-4-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-4 +spec: + type: ClusterIP + ports: + - port: 12025 + protocol: UDP + targetPort: 12025 + name: port-12025 + - port: 13025 + protocol: TCP + targetPort: 13025 + name: port-13025 + - port: 15025 + protocol: TCP + targetPort: 15025 + name: port-15025 + - port: 16025 + protocol: TCP + targetPort: 16025 + name: port-16025 + selector: + app: ssv-node-holesky-4 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-4 + name: ssv-node-holesky-4 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-4 + template: + metadata: + labels: + app: ssv-node-holesky-4 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12025 + name: port-12025 + hostPort: 12025 + protocol: UDP + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15025" + - name: SSV_API_PORT + value: "16025" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-4 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-4-cm + volumes: + - name: ssv-node-holesky-4 + persistentVolumeClaim: + claimName: ssv-node-holesky-4 + - name: ssv-node-holesky-4-cm + configMap: + name: ssv-node-holesky-4-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/networkconfig/config.go b/networkconfig/config.go index a4791e878e..d2981c2c41 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -14,6 +14,7 @@ import ( var SupportedConfigs = map[string]NetworkConfig{ Mainnet.Name: Mainnet, + Holesky.Name: Holesky, HoleskyStage.Name: HoleskyStage, JatoV2Stage.Name: JatoV2Stage, JatoV2.Name: JatoV2, diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go new file mode 100644 index 0000000000..ab5ded8293 --- /dev/null +++ b/networkconfig/holesky.go @@ -0,0 +1,22 @@ +package networkconfig + +import ( + "math/big" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" +) + +var Holesky = NetworkConfig{ + Name: "holesky", + Beacon: beacon.NewNetwork(spectypes.HoleskyNetwork), + Domain: spectypes.DomainType{0x0, 0x0, 0x5, 0x1}, + GenesisEpoch: 1, + RegistrySyncOffset: new(big.Int).SetInt64(181612), + RegistryContractAddr: "0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA", + Bootnodes: []string{ + "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", + }, + WhitelistedOperatorKeys: []string{}, +} From f4d3401cd793956964e0f67e6d12cbf1fec55c67 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Mon, 6 Nov 2023 22:52:53 +0200 Subject: [PATCH 35/54] fix: missing domain type in `setupDiscovery` (#1194) * fix: missing domain type in `setupDiscovery` --- .gitlab-ci.yml | 1 - network/p2p/p2p_setup.go | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2634628aaf..627b5f0e8c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -205,4 +205,3 @@ Deploy exporter to prod: only: - main - diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index c0cb09daf1..7a73901935 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -262,6 +262,7 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { SubnetsIdx: n.idx, HostAddress: n.cfg.HostAddress, HostDNS: n.cfg.HostDNS, + DomainType: n.cfg.Network.Domain, } disc, err := discovery.NewService(n.ctx, logger, discOpts) if err != nil { From fe35f738c58b3c9784c7a0280f600958c3947c5c Mon Sep 17 00:00:00 2001 From: moshe-blox Date: Mon, 6 Nov 2023 23:05:13 +0200 Subject: [PATCH 36/54] refactor: remove domain type discovery logs --- network/discovery/dv5_service.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index 7c1c58150f..91ce4e340a 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -3,7 +3,6 @@ package discovery import ( "bytes" "context" - "encoding/hex" "fmt" "net" "sync/atomic" @@ -152,13 +151,9 @@ func (dvs *DiscV5Service) checkPeer(logger *zap.Logger, e PeerEvent) error { // TODO: uncomment errors once there are sufficient nodes with domain type. nodeDomainType, err := records.GetDomainTypeEntry(e.Node.Record()) if err != nil { - // return fmt.Errorf("could not read domain type: %w", err) - logger.Debug("could not read domain type entry", zap.Error(err)) + // TODO: skip missing domain type (likely old node). } else if nodeDomainType != dvs.domainType { - // return errors.New("different domain type") - logger.Debug("skipping different domain type entry", zap.String("domain_type", hex.EncodeToString(nodeDomainType[:]))) - } else { - logger.Debug("discovered node with matching domain type", zap.String("domain_type", hex.EncodeToString(nodeDomainType[:]))) + // TODO: skip different domain type. } // Get the peer's subnets, skipping if it has none. From 2e08af76f31361c95c7ad7787e27d24283600bd1 Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Tue, 7 Nov 2023 15:08:51 +0100 Subject: [PATCH 37/54] spec align (#1197) * spec align 0.3.4 * bump ekm version --- go.mod | 10 ++-------- go.sum | 8 ++++---- protocol/v2/ssv/runner/runner.go | 19 ++++++++++++++++++- .../v2/ssv/runner/validator_registration.go | 2 +- protocol/v2/ssv/runner/voluntary_exit.go | 2 +- 5 files changed, 26 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index dd15d92b49..cb1cfef274 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.20 require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.16.3 - github.com/bloxapp/eth2-key-manager v1.3.1 - github.com/bloxapp/ssv-spec v0.3.3 + github.com/bloxapp/eth2-key-manager v1.3.2 + github.com/bloxapp/ssv-spec v0.3.4 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/cornelk/hashmap v1.0.8 @@ -222,9 +222,3 @@ require ( replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f - -// TODO: remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 -replace github.com/bloxapp/eth2-key-manager => github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 - -// TODO: remove this replace when the following PR is merged https://github.com/bloxapp/ssv-spec/pull -replace github.com/bloxapp/ssv-spec => github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a diff --git a/go.sum b/go.sum index 4aabd85bc6..cd46217e19 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 h1:vjrMmMH15Bo0QF+228CuEZvCI+OuPyJRco82Gj/WyTI= -github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/eth2-key-manager v1.3.2 h1:xzxwYQZr8DoQrkCBkTnSdDWqqoPq/iy5VoKLxfPf4IY= +github.com/bloxapp/eth2-key-manager v1.3.2/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/ssv-spec v0.3.4 h1:uu1pAP8FBucGf1FGORjzqz7if0vWGRY5w6ILLhA7IuM= +github.com/bloxapp/ssv-spec v0.3.4/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -519,8 +521,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a h1:I9d5JgkAFj3twwA3D7KkMxvp1wvMnkScmSGDvlf8J7o= -github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index 1a06df1e28..ff833d8c31 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -104,6 +104,15 @@ func (b *BaseRunner) baseStartNewDuty(logger *zap.Logger, runner Runner, duty *s return runner.executeDuty(logger, duty) } +// baseStartNewBeaconDuty is a base func that all runner implementation can call to start a non-beacon duty +func (b *BaseRunner) baseStartNewNonBeaconDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessNonBeaconDuty(duty); err != nil { + return errors.Wrap(err, "can't start non-beacon duty") + } + b.baseSetupForNewDuty(duty) + return runner.executeDuty(logger, duty) +} + // basePreConsensusMsgProcessing is a base func that all runner implementation can call for processing a pre-consensus msg func (b *BaseRunner) basePreConsensusMsgProcessing(runner Runner, signedMsg *spectypes.SignedPartialSignatureMessage) (bool, [][32]byte, error) { if err := b.ValidatePreConsensusMsg(runner, signedMsg); err != nil { @@ -271,9 +280,17 @@ func (b *BaseRunner) hasRunningDuty() bool { } func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { + if b.QBFTController.Height >= specqbft.Height(duty.Slot) && b.QBFTController.Height != 0 { + return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + b.QBFTController.Height) + } + return nil +} + +func (b *BaseRunner) ShouldProcessNonBeaconDuty(duty *spectypes.Duty) error { // assume StartingDuty is not nil if state is not nil if b.State != nil && b.State.StartingDuty.Slot >= duty.Slot { - return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + return errors.Errorf("duty for slot %d already passed. Current slot is %d", duty.Slot, b.State.StartingDuty.Slot) } return nil diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 3232fdff3e..f12dae2f08 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -54,7 +54,7 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + return r.BaseRunner.baseStartNewNonBeaconDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go index b569f853cc..147e5f1471 100644 --- a/protocol/v2/ssv/runner/voluntary_exit.go +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -50,7 +50,7 @@ func NewVoluntaryExitRunner( } func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + return r.BaseRunner.baseStartNewNonBeaconDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) From 730980717d4c56d232ca9922fe499b4efedadb49 Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Tue, 7 Nov 2023 15:23:53 +0100 Subject: [PATCH 38/54] Stage (#1199) * refactor: remove domain type discovery logs * spec align (#1197) * spec align 0.3.4 * bump ekm version --------- Co-authored-by: moshe-blox --- go.mod | 10 ++-------- go.sum | 8 ++++---- network/discovery/dv5_service.go | 9 ++------- protocol/v2/ssv/runner/runner.go | 19 ++++++++++++++++++- .../v2/ssv/runner/validator_registration.go | 2 +- protocol/v2/ssv/runner/voluntary_exit.go | 2 +- 6 files changed, 28 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index dd15d92b49..cb1cfef274 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.20 require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.16.3 - github.com/bloxapp/eth2-key-manager v1.3.1 - github.com/bloxapp/ssv-spec v0.3.3 + github.com/bloxapp/eth2-key-manager v1.3.2 + github.com/bloxapp/ssv-spec v0.3.4 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/cornelk/hashmap v1.0.8 @@ -222,9 +222,3 @@ require ( replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f - -// TODO: remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 -replace github.com/bloxapp/eth2-key-manager => github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 - -// TODO: remove this replace when the following PR is merged https://github.com/bloxapp/ssv-spec/pull -replace github.com/bloxapp/ssv-spec => github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a diff --git a/go.sum b/go.sum index 4aabd85bc6..cd46217e19 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 h1:vjrMmMH15Bo0QF+228CuEZvCI+OuPyJRco82Gj/WyTI= -github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/eth2-key-manager v1.3.2 h1:xzxwYQZr8DoQrkCBkTnSdDWqqoPq/iy5VoKLxfPf4IY= +github.com/bloxapp/eth2-key-manager v1.3.2/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/ssv-spec v0.3.4 h1:uu1pAP8FBucGf1FGORjzqz7if0vWGRY5w6ILLhA7IuM= +github.com/bloxapp/ssv-spec v0.3.4/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -519,8 +521,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a h1:I9d5JgkAFj3twwA3D7KkMxvp1wvMnkScmSGDvlf8J7o= -github.com/moshe-blox/ssv-spec v0.0.0-20231105135956-a64e63f6e35a/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index 7c1c58150f..91ce4e340a 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -3,7 +3,6 @@ package discovery import ( "bytes" "context" - "encoding/hex" "fmt" "net" "sync/atomic" @@ -152,13 +151,9 @@ func (dvs *DiscV5Service) checkPeer(logger *zap.Logger, e PeerEvent) error { // TODO: uncomment errors once there are sufficient nodes with domain type. nodeDomainType, err := records.GetDomainTypeEntry(e.Node.Record()) if err != nil { - // return fmt.Errorf("could not read domain type: %w", err) - logger.Debug("could not read domain type entry", zap.Error(err)) + // TODO: skip missing domain type (likely old node). } else if nodeDomainType != dvs.domainType { - // return errors.New("different domain type") - logger.Debug("skipping different domain type entry", zap.String("domain_type", hex.EncodeToString(nodeDomainType[:]))) - } else { - logger.Debug("discovered node with matching domain type", zap.String("domain_type", hex.EncodeToString(nodeDomainType[:]))) + // TODO: skip different domain type. } // Get the peer's subnets, skipping if it has none. diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index 1a06df1e28..ff833d8c31 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -104,6 +104,15 @@ func (b *BaseRunner) baseStartNewDuty(logger *zap.Logger, runner Runner, duty *s return runner.executeDuty(logger, duty) } +// baseStartNewBeaconDuty is a base func that all runner implementation can call to start a non-beacon duty +func (b *BaseRunner) baseStartNewNonBeaconDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessNonBeaconDuty(duty); err != nil { + return errors.Wrap(err, "can't start non-beacon duty") + } + b.baseSetupForNewDuty(duty) + return runner.executeDuty(logger, duty) +} + // basePreConsensusMsgProcessing is a base func that all runner implementation can call for processing a pre-consensus msg func (b *BaseRunner) basePreConsensusMsgProcessing(runner Runner, signedMsg *spectypes.SignedPartialSignatureMessage) (bool, [][32]byte, error) { if err := b.ValidatePreConsensusMsg(runner, signedMsg); err != nil { @@ -271,9 +280,17 @@ func (b *BaseRunner) hasRunningDuty() bool { } func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { + if b.QBFTController.Height >= specqbft.Height(duty.Slot) && b.QBFTController.Height != 0 { + return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + b.QBFTController.Height) + } + return nil +} + +func (b *BaseRunner) ShouldProcessNonBeaconDuty(duty *spectypes.Duty) error { // assume StartingDuty is not nil if state is not nil if b.State != nil && b.State.StartingDuty.Slot >= duty.Slot { - return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + return errors.Errorf("duty for slot %d already passed. Current slot is %d", duty.Slot, b.State.StartingDuty.Slot) } return nil diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 3232fdff3e..f12dae2f08 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -54,7 +54,7 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + return r.BaseRunner.baseStartNewNonBeaconDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go index b569f853cc..147e5f1471 100644 --- a/protocol/v2/ssv/runner/voluntary_exit.go +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -50,7 +50,7 @@ func NewVoluntaryExitRunner( } func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + return r.BaseRunner.baseStartNewNonBeaconDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) From 1eefbd9487b9deb4fde2b588705347fee0d07c47 Mon Sep 17 00:00:00 2001 From: Nikita Kryuchkov Date: Thu, 9 Nov 2023 16:10:25 +0300 Subject: [PATCH 39/54] RSA message encryption in msg validation (#1190) * libp2p RSA PoC * Deploy to 1-4 * Attempt to fix decoding operator private key * Enable signing in pubsub * Enable pubsub trace for 1-4 * Deploy to 5-8 * Enable pubsub trace for 5-8 * Revert "Enable pubsub trace for 5-8" This reverts commit 7c7e52d686084356812b5b5b0a8f655971f9414a. * Revert "Enable pubsub trace for 1-4" This reverts commit 6d4148ddd65e28045fb8625af75e7dea7fc21d65. * Revert "Enable signing in pubsub" This reverts commit 6fea7ac420ad0a81563a4b81a2571326ebcd6090. * feat: ignore messages during handshake * New domain type for 1-8 * Enable signing in pubsub * Enable pubsub trace for 1-4 * Enable pubsub trace for 5-8 * Change DB name * Minor refactoring * Revert "Enable pubsub trace for 5-8" This reverts commit 77c7a6032e6c5e935402ef8fd734509e487ceb60. * Revert "Enable pubsub trace for 1-4" This reverts commit e8046c5b9c59acd623fc111ba4f004f75b86d3bd. * Revert "Enable signing in pubsub" This reverts commit 3e11e6e4284fa3b09ed5a7f603ac5df34b18c386. * Revert "Change DB name" This reverts commit a25e21d5f786204ba40d5b02922b99e9a67f3d9e. * Revert "New domain type for 1-8" This reverts commit 19ac2d9372e051fbe71a9ca482ff4c096c3d1577. * Added deployment for boot-node-3 * Added deployment for test boot-node-3 * Deploy boot-node-3 * Added CONFIG_PATH env * Enable pubsub trace for 1-4 * Enable pubsub trace for 5-8 * Change DB name * New domain type for 1-8 * Enable signing in pubsub * Change bootnode * Remove exporter nodes * Revert "Enable signing in pubsub" This reverts commit 833e693994512798016b1e160e8a015f0d3bc6e1. * Revert "Remove exporter nodes" This reverts commit dba472e0c386df6eb2633384ae7467372cb92221. * Revert "Change bootnode" This reverts commit 11670609f3dd90713f1b5c3ced4289b974cb59c0. * Revert "New domain type for 1-8" This reverts commit eb70da75d33fdef2e29c3ea59cef307a2a43f190. * Revert "Change DB name" This reverts commit 26d01f87e40adaa88e912ec62469f4d6d5b399ac. * Revert "Enable pubsub trace for 5-8" This reverts commit a76de6fb6bc626639d156665164178f017cd3c86. * Revert "Enable pubsub trace for 1-4" This reverts commit 4bd683c9e651392634bf6aff2c799c712ef60f93. * Change DB name * New domain type for 1-8 * Change bootnode * Revert "Change bootnode" This reverts commit 51ec5f8994fc52dd510f35326d2d2a3655f918a5. * Revert "New domain type for 1-8" This reverts commit 435b414a1137155878cc93de14be3a2f975df3e7. * Revert "Change DB name" This reverts commit e96a5b34f85a6e3c1055a433aebb1ace34b42ec1. * Change bootnode on jato-v2-stage * Revert "Change bootnode on jato-v2-stage" This reverts commit 9f842f119ed0f5d25f0d3e93f8344aede1c6bd93. * Stop nodes * Revert "Deploy to 5-8" This reverts commit 0832e0dd2d0af99a15231df1dd8efb40c3bbac13. * Revert "Stop nodes" This reverts commit bf9af43e22256110647393518d034679346000cc. * Enable signing in pubsub * Revert "Enable signing in pubsub" This reverts commit 13711bc1eefaa7bd8aad73cfe5edb0df7ccbd44b. * Enable signing in pubsub * Deploy to 5-8 * Revert "Enable signing in pubsub" This reverts commit f6146885dfda010781bfb6251a13ac9bb4d97663. * Add signature policy log * Deploy only to 5-8, change branch name * Message encryption * Disable deployment * fmt * Deploy 9-12 & 17-72 * Fix decoding panic * Don't ignore not connected peers * Attempt to fix key in p2p * Disable deployment to 69-72 * Disable non-RSA signature verification * Break message data to make sure verification works * Revert "Break message data to make sure verification works" This reverts commit 51f318ba24cdcd4d488272f2c4802aee17a59787. * Deploy to all nodes * Revert "Disable non-RSA signature verification" This reverts commit 5ae7de4617277bbd625539b465fcfc9f8c538408. * Validate signatures in message validation * Cancel deploy to 13-16 * Check BLS only in commits * Revert "Validate signatures in message validation" This reverts commit a9da254112c90409789ac8d89d02d9ad38f9830c. * Validate signatures in message validation * Check BLS only in committee decided * Check non-committee signatures in exporter * Enable BLS in protocol, disable in message validation * Don't verify signature in IsProposalJustification * Revert "Don't verify signature in IsProposalJustification" This reverts commit 0f06e116d1e0d76692785d9e737f4e235a4dcf29. * Revert "Enable BLS in protocol, disable in message validation" This reverts commit a230d7c8d41f3388e8f0322e39e9302394e8929c. * Attempt to make signing correct * ignore signatures when computing libp2p msg ids * add metric * Add a TODO * Fix metricPubsubInbound * Get rid of BLS in message validation * fix metric name * fix metric * Signing benchmarks * RSA benchmarks * Use operator ID instead of public key * Revert "Use operator ID instead of public key" This reverts commit 5a2ee21f1e62cfad35d1301384d88e9c7afa421e. * Use operator ID instead of public key * Attempt to fix public key parsing * Delete redundant files * Cleanup * Cache RSA public keys * Init map * Cleanup * Get rid of JSON encoding of signed SSV messages * Cleanup * Fix typo * add forking for rsa message encryption on holesky * Attempt to test fork * Avoid deploying to 41--44 * Revert "Avoid deploying to 41--44" This reverts commit 790163e26427f04ed8b6a75f6e3f151e79d95552. * Update fork epoch * Update fork epoch * Attempt to fix * More logs * Update fork epoch * Disable logs * Move messageData assignment * Temporarily simplify RSAMessageFork * More simplification of RSAMessageFork * Revert "More simplification of RSAMessageFork" This reverts commit e2f556f03051174e29f66e7f5ea1be18970c55da. * Revert "Temporarily simplify RSAMessageFork" This reverts commit 6bc197c7ffe71b7256496dd3dd5a7d4d5a7df22e. * Revert "Move messageData assignment" This reverts commit a7a4557c49d9f2cfc1ecb1955cde6801059e688a. * Revert "Disable logs" This reverts commit 7580d836751aac684182c158d2f5a427a213910f. * Disable logs * Move messageData assignment * Revert "Move messageData assignment" This reverts commit c6f760c0d46f71ef1a3ba831724f98226164cb63. * Temporarily simplify RSAMessageFork * More simplification of RSAMessageFork * Update epoch * Update epoch * Update epoch * Comment out encryption * Revert "Comment out encryption" This reverts commit 9ff54298546c3071445b3e629b33759bb5f8a34c. * Comment out encryption * Minor cleanup * Revert "Comment out encryption" This reverts commit 8e7369b140a46a030e285b7425481b3b64c5d686. * Attempt to fix message ID * Update epoch * Network config cleanup * Revert important log removal * Fix some tests * Cleanup * Fix linter * Nested signature verification * Add unit tests * Fix linter * Revert deployment * Fix TestMultipleSlotTickers * Rename a variable * Increase TestNetworkRSAForkEpoch * Attempt to fix tests * Add operator ID to sig verification err message * change encoding func to allocate less * RSAMessageFork config value instead of func * rename caches to clearer names * linting * fix values for test * Change newQBFTConfig signature * go mod tidy * Deploy to all except 13-16 * Revert "Deploy to all except 13-16" This reverts commit 543c0b5c2ff2d90ae0d69709ea621b46e77f8922. * Deploy to all * Revert "Deploy to all" This reverts commit ebb6db8d842745ee7364876b121e6889fca1ccc7. * Deploy to all * Revert "Deploy to all" This reverts commit 9658443b850ed00f34c905f93e0065942d4ded95. * Delete unused struct * Change mainnet fork epoch temporary value * Rename OperatorPublicKey to OperatorPubKeyHash --------- Co-authored-by: moshe-blox Co-authored-by: stoyan.peev Co-authored-by: y0sher --- cli/operator/node.go | 34 +- go.mod | 1 + go.sum | 2 + identity/store_test.go | 6 +- integration/qbft/tests/scenario_test.go | 5 +- message/validation/consensus_state.go | 34 + message/validation/consensus_validation.go | 13 +- message/validation/errors.go | 5 +- message/validation/partial_validation.go | 67 +- message/validation/qbft_config.go | 11 +- message/validation/rsa.go | 57 ++ message/validation/validation.go | 104 ++- message/validation/validation_test.go | 606 +++++++++++++----- network/commons/common.go | 30 + network/commons/keys.go | 26 +- network/discovery/dv5_service.go | 2 +- network/discovery/enode.go | 5 +- network/discovery/enode_test.go | 2 +- network/p2p/config.go | 12 +- network/p2p/metrics.go | 6 +- network/p2p/p2p.go | 34 +- network/p2p/p2p_pubsub.go | 19 +- network/p2p/p2p_setup.go | 6 +- network/p2p/p2p_sync.go | 20 + network/p2p/p2p_test.go | 55 ++ network/p2p/test_utils.go | 35 +- network/peers/index.go | 5 +- network/peers/scores_test.go | 2 +- network/peers/subnets_test.go | 2 +- network/records/subnets_test.go | 2 +- network/topics/controller.go | 4 +- network/topics/controller_test.go | 2 +- network/topics/msg_id.go | 53 +- network/topics/msg_validator_test.go | 2 +- networkconfig/config.go | 1 + networkconfig/holesky-stage.go | 1 + networkconfig/jato-v2.go | 1 + networkconfig/local-testnet.go | 3 + networkconfig/mainnet.go | 2 + networkconfig/test-network.go | 1 + operator/slotticker/slotticker_test.go | 6 +- operator/validator/controller.go | 6 +- operator/validator/router.go | 3 - .../ssv/validator/non_committee_validator.go | 2 +- protocol/v2/ssv/validator/opts.go | 1 - .../types/signature_benchmark_linux_test.go | 70 ++ protocol/v2/types/signature_benchmark_test.go | 180 ++++++ utils/keys.go | 6 +- 48 files changed, 1130 insertions(+), 422 deletions(-) create mode 100644 message/validation/consensus_state.go create mode 100644 message/validation/rsa.go create mode 100644 protocol/v2/types/signature_benchmark_linux_test.go create mode 100644 protocol/v2/types/signature_benchmark_test.go diff --git a/cli/operator/node.go b/cli/operator/node.go index 9c20e2fda0..a6bca602c3 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -2,7 +2,9 @@ package operator import ( "context" + "crypto/rsa" "crypto/x509" + "encoding/base64" "fmt" "log" "math/big" @@ -62,10 +64,6 @@ type KeyStore struct { PasswordFile string `yaml:"PasswordFile" env:"PASSWORD_FILE" env-description:"Password for operator private key file decryption"` } -type MessageValidation struct { - VerifySignatures bool `yaml:"VerifySignatures" env:"MESSAGE_VALIDATION_VERIFY_SIGNATURES" env-default:"false" env-description:"Experimental feature to verify signatures in pubsub's message validation instead of in consensus protocol."` -} - type config struct { global_config.GlobalConfig `yaml:"global"` DBOptions basedb.Options `yaml:"db"` @@ -82,7 +80,6 @@ type config struct { WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` - MessageValidation MessageValidation `yaml:"MessageValidation"` } var cfg config @@ -166,7 +163,8 @@ var StartNodeCmd = &cobra.Command{ cfg.P2pNetworkConfig.Permissioned = permissioned cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) cfg.P2pNetworkConfig.NodeStorage = nodeStorage - cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) + cfg.P2pNetworkConfig.OperatorPubKeyHash = format.OperatorID(operatorData.PublicKey) + cfg.P2pNetworkConfig.OperatorID = operatorData.ID cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode cfg.P2pNetworkConfig.Network = networkConfig @@ -177,19 +175,16 @@ var StartNodeCmd = &cobra.Command{ messageValidator := validation.NewMessageValidator( networkConfig, - validation.WithShareStorage(nodeStorage.Shares()), + validation.WithNodeStorage(nodeStorage), validation.WithLogger(logger), validation.WithMetrics(metricsReporter), validation.WithDutyStore(dutyStore), validation.WithOwnOperatorID(operatorData.ID), - validation.WithSignatureVerification(cfg.MessageValidation.VerifySignatures), ) cfg.P2pNetworkConfig.Metrics = metricsReporter cfg.P2pNetworkConfig.MessageValidator = messageValidator cfg.SSVOptions.ValidatorOptions.MessageValidator = messageValidator - // if signature check is enabled in message validation then it's disabled in validator controller and vice versa - cfg.SSVOptions.ValidatorOptions.VerifySignatures = !cfg.MessageValidation.VerifySignatures p2pNetwork := setupP2P(logger, db) @@ -452,6 +447,11 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database) (operatorstora cfg.OperatorPrivateKey = rsaencryption.ExtractPrivateKey(privateKey) } + cfg.P2pNetworkConfig.OperatorPrivateKey, err = decodePrivateKey(cfg.OperatorPrivateKey) + if err != nil { + logger.Fatal("could not decode operator private key", zap.Error(err)) + } + operatorPubKey, err := nodeStorage.SetupPrivateKey(cfg.OperatorPrivateKey) if err != nil { logger.Fatal("could not setup operator private key", zap.Error(err)) @@ -475,6 +475,20 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database) (operatorstora return nodeStorage, operatorData } +func decodePrivateKey(key string) (*rsa.PrivateKey, error) { + operatorKeyByte, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return nil, err + } + + sk, err := rsaencryption.ConvertPemToPrivateKey(string(operatorKeyByte)) + if err != nil { + return nil, err + } + + return sk, err +} + func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { networkConfig, err := networkconfig.GetNetworkConfigByName(cfg.SSVOptions.NetworkName) if err != nil { diff --git a/go.mod b/go.mod index cb1cfef274..7be627fb25 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/libp2p/go-libp2p v0.28.2 github.com/libp2p/go-libp2p-kad-dht v0.23.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 + github.com/microsoft/go-crypto-openssl v0.2.8 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multistream v0.4.1 github.com/patrickmn/go-cache v2.1.0+incompatible diff --git a/go.sum b/go.sum index cd46217e19..527b7eea50 100644 --- a/go.sum +++ b/go.sum @@ -487,6 +487,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/microsoft/go-crypto-openssl v0.2.8 h1:16B6DVeBCimOAG0B92PSySOnVDq6Qr/siI3TyyMHXoI= +github.com/microsoft/go-crypto-openssl v0.2.8/go.mod h1:xOSmQnWz4xvNB2+KQN2g2UUwMG9vqDHBk9nk/NdmyRw= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= diff --git a/identity/store_test.go b/identity/store_test.go index 4fe7da535a..877eeac669 100644 --- a/identity/store_test.go +++ b/identity/store_test.go @@ -68,7 +68,7 @@ func TestSetupPrivateKey(t *testing.T) { require.NoError(t, err) require.NotNil(t, sk) - interfacePriv, err := commons.ConvertToInterfacePrivkey(privKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privKey) require.NoError(t, err) b, err := interfacePriv.Raw() require.NoError(t, err) @@ -87,7 +87,7 @@ func TestSetupPrivateKey(t *testing.T) { return } if test.existKey != "" && test.passedKey == "" { // exist and not passed in env - interfacePriv, err := commons.ConvertToInterfacePrivkey(privateKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privateKey) require.NoError(t, err) b, err := interfacePriv.Raw() require.NoError(t, err) @@ -95,7 +95,7 @@ func TestSetupPrivateKey(t *testing.T) { return } // not exist && passed and exist && passed - interfacePriv, err := commons.ConvertToInterfacePrivkey(privateKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privateKey) require.NoError(t, err) b, err := interfacePriv.Raw() require.NoError(t, err) diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index e803fd9616..13f14d07f9 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -188,8 +188,9 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID require.NoError(t, err) options := protocolvalidator.Options{ - Storage: newStores(logger), - Network: node, + Storage: newStores(logger), + Network: node, + BeaconNetwork: networkconfig.TestNetwork.Beacon, SSVShare: &types.SSVShare{ Share: *testingShare(keySet, id), Metadata: types.Metadata{ diff --git a/message/validation/consensus_state.go b/message/validation/consensus_state.go new file mode 100644 index 0000000000..5f8869e6d4 --- /dev/null +++ b/message/validation/consensus_state.go @@ -0,0 +1,34 @@ +package validation + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/cornelk/hashmap" +) + +// ConsensusID uniquely identifies a public key and role pair to keep track of state. +type ConsensusID struct { + PubKey phase0.BLSPubKey + Role spectypes.BeaconRole +} + +// ConsensusState keeps track of the signers for a given public key and role. +type ConsensusState struct { + // TODO: consider evicting old data to avoid excessive memory consumption + Signers *hashmap.Map[spectypes.OperatorID, *SignerState] +} + +// GetSignerState retrieves the state for the given signer. +// Returns nil if the signer is not found. +func (cs *ConsensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { + signerState, _ := cs.Signers.Get(signer) + return signerState +} + +// CreateSignerState initializes and sets a new SignerState for the given signer. +func (cs *ConsensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { + signerState := &SignerState{} + cs.Signers.Set(signer, signerState) + + return signerState +} diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go index 6bdf023fc4..fde979826b 100644 --- a/message/validation/consensus_validation.go +++ b/message/validation/consensus_validation.go @@ -4,7 +4,6 @@ package validation import ( "bytes" - "encoding/hex" "fmt" "time" @@ -23,6 +22,7 @@ func (mv *messageValidator) validateConsensusMessage( signedMsg *specqbft.SignedMessage, messageID spectypes.MessageID, receivedAt time.Time, + signatureVerifier func() error, ) (ConsensusDescriptor, phase0.Slot, error) { var consensusDescriptor ConsensusDescriptor @@ -116,12 +116,9 @@ func (mv *messageValidator) validateConsensusMessage( } } - if mv.verifySignatures { - if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.QBFTSignatureType, share.Committee); err != nil { - signErr := ErrInvalidSignature - signErr.innerErr = err - signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) - return consensusDescriptor, msgSlot, signErr + if signatureVerifier != nil { + if err := signatureVerifier(); err != nil { + return consensusDescriptor, msgSlot, err } } @@ -178,7 +175,7 @@ func (mv *messageValidator) validateJustifications( } if signedMsg.Message.MsgType == specqbft.ProposalMsgType { - cfg := newQBFTConfig(mv.netCfg.Domain, mv.verifySignatures) + cfg := newQBFTConfig(mv.netCfg.Domain) if err := instance.IsProposalJustification( cfg, diff --git a/message/validation/errors.go b/message/validation/errors.go index 29d631675c..4eeacf4877 100644 --- a/message/validation/errors.go +++ b/message/validation/errors.go @@ -56,6 +56,8 @@ var ( ErrEarlyMessage = Error{text: "early message"} ErrLateMessage = Error{text: "late message"} ErrTooManySameTypeMessagesPerRound = Error{text: "too many messages of same type per round"} + ErrRSADecryption = Error{text: "rsa decryption", reject: true} + ErrOperatorNotFound = Error{text: "operator not found", reject: true} ErrPubSubMessageHasNoData = Error{text: "pub-sub message has no data", reject: true} ErrPubSubDataTooBig = Error{text: "pub-sub message data too big", reject: true} ErrMalformedPubSubMessage = Error{text: "pub-sub message is malformed", reject: true} @@ -74,10 +76,9 @@ var ( ErrSignersNotSorted = Error{text: "signers are not sorted", reject: true} ErrUnexpectedSigner = Error{text: "signer is not expected", reject: true} ErrInvalidHash = Error{text: "root doesn't match full data hash", reject: true} - ErrInvalidSignature = Error{text: "invalid signature", reject: true} - ErrInvalidPartialSignature = Error{text: "invalid partial signature", reject: true} ErrEstimatedRoundTooFar = Error{text: "message round is too far from estimated"} ErrMalformedMessage = Error{text: "message could not be decoded", reject: true} + ErrMalformedSignedMessage = Error{text: "signed message could not be decoded", reject: true} ErrUnknownSSVMessageType = Error{text: "unknown SSV message type", reject: true} ErrUnknownQBFTMessageType = Error{text: "unknown QBFT message type", reject: true} ErrUnknownPartialMessageType = Error{text: "unknown partial signature message type", reject: true} diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go index 781267f22d..0cfcbdac91 100644 --- a/message/validation/partial_validation.go +++ b/message/validation/partial_validation.go @@ -3,14 +3,9 @@ package validation // partial_validation.go contains methods for validating partial signature messages import ( - "encoding/hex" - "fmt" - "time" - "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/herumi/bls-eth-go-binary/bls" "golang.org/x/exp/slices" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" @@ -20,6 +15,7 @@ func (mv *messageValidator) validatePartialSignatureMessage( share *ssvtypes.SSVShare, signedMsg *spectypes.SignedPartialSignatureMessage, msgID spectypes.MessageID, + signatureVerifier func() error, ) (phase0.Slot, error) { if mv.inCommittee(share) { mv.metrics.InCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) @@ -56,8 +52,8 @@ func (mv *messageValidator) validatePartialSignatureMessage( return msgSlot, err } - if mv.verifySignatures { - if err := mv.validPartialSignatures(share, signedMsg); err != nil { + if signatureVerifier != nil { + if err := signatureVerifier(); err != nil { return msgSlot, err } } @@ -114,63 +110,6 @@ func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType spectypes.Pa } } -func (mv *messageValidator) validPartialSignatures(share *ssvtypes.SSVShare, signedMsg *spectypes.SignedPartialSignatureMessage) error { - if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.PartialSignatureType, share.Committee); err != nil { - signErr := ErrInvalidSignature - signErr.innerErr = err - signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) - return signErr - } - - for _, message := range signedMsg.Message.Messages { - if err := mv.verifyPartialSignature(message, share); err != nil { - return err - } - } - - return nil -} - -func (mv *messageValidator) verifyPartialSignature(msg *spectypes.PartialSignatureMessage, share *ssvtypes.SSVShare) error { - signer := msg.Signer - signature := msg.PartialSignature - root := msg.SigningRoot - - for _, n := range share.Committee { - if n.GetID() != signer { - continue - } - - pk, err := ssvtypes.DeserializeBLSPublicKey(n.GetPublicKey()) - if err != nil { - return fmt.Errorf("deserialize pk: %w", err) - } - sig := &bls.Sign{} - if err := sig.Deserialize(signature); err != nil { - return fmt.Errorf("deserialize signature: %w", err) - } - - if !mv.aggregateVerify(sig, pk, root) { - return ErrInvalidPartialSignature - } - - return nil - } - - return ErrSignerNotInCommittee -} - -func (mv *messageValidator) aggregateVerify(sig *bls.Sign, pk bls.PublicKey, root [32]byte) bool { - start := time.Now() - - valid := sig.FastAggregateVerify([]bls.PublicKey{pk}, root[:]) - - sinceStart := time.Since(start) - mv.metrics.SignatureValidationDuration(sinceStart) - - return valid -} - func (mv *messageValidator) validatePartialMessages(share *ssvtypes.SSVShare, m *spectypes.SignedPartialSignatureMessage) error { if err := mv.commonSignerValidation(m.Signer, share); err != nil { return err diff --git a/message/validation/qbft_config.go b/message/validation/qbft_config.go index fe5ed6dc04..9750b3d8d3 100644 --- a/message/validation/qbft_config.go +++ b/message/validation/qbft_config.go @@ -8,15 +8,14 @@ import ( qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) +// qbftConfig is used in message validation and has no signature verification. type qbftConfig struct { - domain spectypes.DomainType - verifySignature bool + domain spectypes.DomainType } -func newQBFTConfig(domain spectypes.DomainType, verifySignature bool) qbftConfig { +func newQBFTConfig(domain spectypes.DomainType) qbftConfig { return qbftConfig{ - domain: domain, - verifySignature: verifySignature, + domain: domain, } } @@ -49,5 +48,5 @@ func (q qbftConfig) GetTimer() roundtimer.Timer { } func (q qbftConfig) VerifySignatures() bool { - return q.verifySignature + return false } diff --git a/message/validation/rsa.go b/message/validation/rsa.go new file mode 100644 index 0000000000..94071ae0f6 --- /dev/null +++ b/message/validation/rsa.go @@ -0,0 +1,57 @@ +package validation + +import ( + "crypto" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "fmt" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/utils/rsaencryption" +) + +func (mv *messageValidator) verifyRSASignature(messageData []byte, operatorID spectypes.OperatorID, signature []byte) error { + rsaPubKey, ok := mv.operatorIDToPubkeyCache.Get(operatorID) + if !ok { + operator, found, err := mv.nodeStorage.GetOperatorData(nil, operatorID) + if err != nil { + e := ErrOperatorNotFound + e.got = operatorID + e.innerErr = err + return e + } + if !found { + e := ErrOperatorNotFound + e.got = operatorID + return e + } + + operatorPubKey, err := base64.StdEncoding.DecodeString(string(operator.PublicKey)) + if err != nil { + e := ErrRSADecryption + e.innerErr = fmt.Errorf("decode public key: %w", err) + return e + } + + rsaPubKey, err = rsaencryption.ConvertPemToPublicKey(operatorPubKey) + if err != nil { + e := ErrRSADecryption + e.innerErr = fmt.Errorf("convert PEM: %w", err) + return e + } + + mv.operatorIDToPubkeyCache.Set(operatorID, rsaPubKey) + } + + messageHash := sha256.Sum256(messageData) + + if err := rsa.VerifyPKCS1v15(rsaPubKey, crypto.SHA256, messageHash[:], signature); err != nil { + e := ErrRSADecryption + e.innerErr = fmt.Errorf("verify opid: %v signature: %w", operatorID, err) + return e + } + + return nil +} diff --git a/message/validation/validation.go b/message/validation/validation.go index 98e100fa3c..4fc6b1cd76 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -6,6 +6,7 @@ package validation import ( "bytes" "context" + "crypto/rsa" "encoding/hex" "fmt" "strings" @@ -27,10 +28,10 @@ import ( "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties/dutystore" + operatorstorage "github.com/bloxapp/ssv/operator/storage" ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" - registrystorage "github.com/bloxapp/ssv/registry/storage" ) const ( @@ -50,33 +51,6 @@ const ( maxDutiesPerEpoch = 2 ) -// ConsensusID uniquely identifies a public key and role pair to keep track of state. -type ConsensusID struct { - PubKey phase0.BLSPubKey - Role spectypes.BeaconRole -} - -// ConsensusState keeps track of the signers for a given public key and role. -type ConsensusState struct { - // TODO: consider evicting old data to avoid excessive memory consumption - Signers *hashmap.Map[spectypes.OperatorID, *SignerState] -} - -// GetSignerState retrieves the state for the given signer. -// Returns nil if the signer is not found. -func (cs *ConsensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { - signerState, _ := cs.Signers.Get(signer) - return signerState -} - -// CreateSignerState initializes and sets a new SignerState for the given signer. -func (cs *ConsensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { - signerState := &SignerState{} - cs.Signers.Set(signer, signerState) - - return signerState -} - // PubsubMessageValidator defines methods for validating pubsub messages. type PubsubMessageValidator interface { ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult @@ -95,22 +69,23 @@ type MessageValidator interface { } type messageValidator struct { - logger *zap.Logger - metrics metrics - netCfg networkconfig.NetworkConfig - index sync.Map - shareStorage registrystorage.Shares - dutyStore *dutystore.Store - ownOperatorID spectypes.OperatorID - verifySignatures bool + logger *zap.Logger + metrics metrics + netCfg networkconfig.NetworkConfig + index sync.Map + nodeStorage operatorstorage.Storage + dutyStore *dutystore.Store + ownOperatorID spectypes.OperatorID + operatorIDToPubkeyCache *hashmap.Map[spectypes.OperatorID, *rsa.PublicKey] } // NewMessageValidator returns a new MessageValidator with the given network configuration and options. func NewMessageValidator(netCfg networkconfig.NetworkConfig, opts ...Option) MessageValidator { mv := &messageValidator{ - logger: zap.NewNop(), - metrics: &nopMetrics{}, - netCfg: netCfg, + logger: zap.NewNop(), + metrics: &nopMetrics{}, + netCfg: netCfg, + operatorIDToPubkeyCache: hashmap.New[spectypes.OperatorID, *rsa.PublicKey](), } for _, opt := range opts { @@ -151,17 +126,10 @@ func WithOwnOperatorID(id spectypes.OperatorID) Option { } } -// WithShareStorage sets the share storage for the messageValidator. -func WithShareStorage(shareStorage registrystorage.Shares) Option { - return func(mv *messageValidator) { - mv.shareStorage = shareStorage - } -} - -// WithSignatureVerification sets whether to verify signatures in the messageValidator. -func WithSignatureVerification(check bool) Option { +// WithNodeStorage sets the node storage for the messageValidator. +func WithNodeStorage(nodeStorage operatorstorage.Storage) Option { return func(mv *messageValidator) { - mv.verifySignatures = check + mv.nodeStorage = nodeStorage } } @@ -243,7 +211,7 @@ func (mv *messageValidator) ValidatorForTopic(_ string) func(ctx context.Context // ValidatePubsubMessage validates the given pubsub message. // Depending on the outcome, it will return one of the pubsub validation results (Accept, Ignore, or Reject). -func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, _ peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { +func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, peerID peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { start := time.Now() var validationDurationLabels []string // TODO: implement @@ -295,7 +263,7 @@ func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, _ peer.ID, // ValidateSSVMessage validates the given SSV message. // If successful, it returns the decoded message and its descriptor. Otherwise, it returns an error. func (mv *messageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) { - return mv.validateSSVMessage(ssvMessage, time.Now()) + return mv.validateSSVMessage(ssvMessage, time.Now(), nil) } func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { @@ -305,6 +273,24 @@ func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt defer mv.metrics.ActiveMsgValidationDone(topic) messageData := pMsg.GetData() + + var signatureVerifier func() error + + currentEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())) + if currentEpoch > mv.netCfg.RSAForkEpoch { + decMessageData, operatorID, signature, err := commons.DecodeSignedSSVMessage(messageData) + messageData = decMessageData + if err != nil { + e := ErrMalformedSignedMessage + e.innerErr = err + return nil, Descriptor{}, e + } + + signatureVerifier = func() error { + return mv.verifyRSASignature(messageData, operatorID, signature) + } + } + if len(messageData) == 0 { return nil, Descriptor{}, ErrPubSubMessageHasNoData } @@ -349,10 +335,10 @@ func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt mv.metrics.SSVMessageType(msg.MsgType) - return mv.validateSSVMessage(msg, receivedAt) + return mv.validateSSVMessage(msg, receivedAt, signatureVerifier) } -func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { +func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, receivedAt time.Time, signatureVerifier func() error) (*queue.DecodedSSVMessage, Descriptor, error) { var descriptor Descriptor if len(ssvMessage.Data) == 0 { @@ -390,8 +376,8 @@ func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, } var share *ssvtypes.SSVShare - if mv.shareStorage != nil { - share = mv.shareStorage.Get(nil, publicKey.Serialize()) + if mv.nodeStorage != nil { + share = mv.nodeStorage.Shares().Get(nil, publicKey.Serialize()) if share == nil { e := ErrUnknownValidator e.got = publicKey.SerializeToHexStr() @@ -428,7 +414,7 @@ func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, descriptor.SSVMessageType = ssvMessage.MsgType - if mv.shareStorage != nil { + if mv.nodeStorage != nil { switch ssvMessage.MsgType { case spectypes.SSVConsensusMsgType: if len(msg.Data) > maxConsensusMsgSize { @@ -438,7 +424,8 @@ func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, return nil, descriptor, e } - consensusDescriptor, slot, err := mv.validateConsensusMessage(share, msg.Body.(*specqbft.SignedMessage), msg.GetID(), receivedAt) + signedMessage := msg.Body.(*specqbft.SignedMessage) + consensusDescriptor, slot, err := mv.validateConsensusMessage(share, signedMessage, msg.GetID(), receivedAt, signatureVerifier) descriptor.Consensus = &consensusDescriptor descriptor.Slot = slot if err != nil { @@ -453,7 +440,8 @@ func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, return nil, descriptor, e } - slot, err := mv.validatePartialSignatureMessage(share, msg.Body.(*spectypes.SignedPartialSignatureMessage), msg.GetID()) + partialSignatureMessage := msg.Body.(*spectypes.SignedPartialSignatureMessage) + slot, err := mv.validatePartialSignatureMessage(share, partialSignatureMessage, msg.GetID(), signatureVerifier) descriptor.Slot = slot if err != nil { return nil, descriptor, err diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index b307e05049..f7afaaa053 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -2,6 +2,10 @@ package validation import ( "bytes" + "crypto" + crand "crypto/rand" + "crypto/rsa" + "crypto/sha256" "encoding/hex" "math" "testing" @@ -12,6 +16,7 @@ import ( specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/ethereum/go-ethereum/common" "github.com/herumi/bls-eth-go-binary/bls" pubsub "github.com/libp2p/go-libp2p-pubsub" pspb "github.com/libp2p/go-libp2p-pubsub/pb" @@ -26,8 +31,10 @@ import ( beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/rsaencryption" ) func Test_ValidateSSVMessage(t *testing.T) { @@ -59,7 +66,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Message validation happy flow, messages are not ignored or rejected and there are no errors t.Run("happy flow", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -75,13 +82,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.NoError(t, err) }) // Make sure messages are incremented and throw an ignore message if more than 1 for a commit t.Run("message counts", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -104,10 +111,10 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) require.NoError(t, err) - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) state1 := state.GetSignerState(1) @@ -125,7 +132,7 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) ssvMsg.Data = encodedMsg - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) require.NoError(t, err) require.NotNil(t, state1) @@ -133,7 +140,7 @@ func Test_ValidateSSVMessage(t *testing.T) { require.EqualValues(t, 2, state1.Round) require.EqualValues(t, MessageCounts{Prepare: 1}, state1.MessageCounts) - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) signedMsg = spectestingutils.TestingCommitMessageWithHeight(ks.Shares[1], 1, height+1) @@ -141,14 +148,14 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) ssvMsg.Data = encodedMsg - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec()), nil) require.NoError(t, err) require.NotNil(t, state1) require.EqualValues(t, height+1, state1.Slot) require.EqualValues(t, 1, state1.Round) require.EqualValues(t, MessageCounts{Commit: 1}, state1.MessageCounts) - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec()), nil) require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) signedMsg = spectestingutils.TestingCommitMultiSignerMessageWithHeight([]*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, height+1) @@ -156,7 +163,7 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) ssvMsg.Data = encodedMsg - _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec()), nil) require.NoError(t, err) require.NotNil(t, state1) require.EqualValues(t, height+1, state1.Slot) @@ -166,7 +173,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send a pubsub message with no data should cause an error t.Run("pubsub message has no data", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -180,7 +187,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send a pubsub message where there is too much data should cause an error t.Run("pubsub data too big", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -203,7 +210,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send a malformed pubsub message (empty message) should return an error t.Run("empty pubsub message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -224,7 +231,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send a message with incorrect data (unable to decode incorrect message type) t.Run("bad data format", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -235,14 +242,14 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrMalformedMessage.Error()) }) // Send a message with no data should return an error t.Run("no data", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) message := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, @@ -250,7 +257,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: []byte{}, } - _, _, err := validator.validateSSVMessage(message, time.Now()) + _, _, err := validator.validateSSVMessage(message, time.Now(), nil) require.ErrorIs(t, err, ErrEmptyData) message = &spectypes.SSVMessage{ @@ -259,13 +266,13 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: nil, } - _, _, err = validator.validateSSVMessage(message, time.Now()) + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) require.ErrorIs(t, err, ErrEmptyData) }) // Send a message where there is too much data should cause an error t.Run("data too big", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) const tooBigMsgSize = maxMessageSize * 2 @@ -275,7 +282,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: bytes.Repeat([]byte{0x1}, tooBigMsgSize), } - _, _, err := validator.validateSSVMessage(message, time.Now()) + _, _, err := validator.validateSSVMessage(message, time.Now(), nil) expectedErr := ErrSSVDataTooBig expectedErr.got = tooBigMsgSize expectedErr.want = maxMessageSize @@ -284,7 +291,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send exact allowed data size amount but with invalid data (fails to decode) t.Run("data size borderline / malformed message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) message := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, @@ -292,13 +299,13 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: bytes.Repeat([]byte{0x1}, maxMessageSize), } - _, _, err := validator.validateSSVMessage(message, time.Now()) + _, _, err := validator.validateSSVMessage(message, time.Now(), nil) require.ErrorContains(t, err, ErrMalformedMessage.Error()) }) // Send an invalid SSV message type returns an error t.Run("invalid SSV message type", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) message := &spectypes.SSVMessage{ MsgType: math.MaxUint64, @@ -306,13 +313,13 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: []byte{0x1}, } - _, _, err = validator.validateSSVMessage(message, time.Now()) + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) require.ErrorContains(t, err, ErrUnknownSSVMessageType.Error()) }) // Empty validator public key returns an error t.Run("empty validator public key", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) encodedValidSignedMessage, err := validSignedMessage.Encode() @@ -324,13 +331,13 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedValidSignedMessage, } - _, _, err = validator.validateSSVMessage(message, time.Now()) + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) require.ErrorContains(t, err, ErrDeserializePublicKey.Error()) }) // Generate random validator and validate it is unknown to the network t.Run("unknown validator", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) sk, err := eth2types.GenerateBLSPrivateKey() require.NoError(t, err) @@ -345,7 +352,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedValidSignedMessage, } - _, _, err = validator.validateSSVMessage(message, time.Now()) + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) expectedErr := ErrUnknownValidator expectedErr.got = hex.EncodeToString(sk.PublicKey().Marshal()) require.ErrorIs(t, err, expectedErr) @@ -353,7 +360,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Make sure messages are dropped if on the incorrect network t.Run("wrong domain", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -370,7 +377,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrWrongDomain expectedErr.got = hex.EncodeToString(wrongDomain[:]) expectedErr.want = hex.EncodeToString(netCfg.Domain[:]) @@ -379,7 +386,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send message with a value that refers to a non-existent role t.Run("invalid role", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -395,13 +402,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrInvalidRole) }) // Perform validator registration with a consensus type message will give an error t.Run("consensus validator registration", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -417,13 +424,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrConsensusValidatorRegistration) }) // Ignore messages related to a validator that is liquidated t.Run("liquidated validator", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) liquidatedSK, err := eth2types.GenerateBLSPrivateKey() require.NoError(t, err) @@ -451,7 +458,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedValidSignedMessage, } - _, _, err = validator.validateSSVMessage(message, time.Now()) + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) expectedErr := ErrValidatorLiquidated require.ErrorIs(t, err, expectedErr) @@ -460,7 +467,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Ignore messages related to a validator that is not active t.Run("inactive validator", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) inactiveSK, err := eth2types.GenerateBLSPrivateKey() require.NoError(t, err) @@ -491,7 +498,7 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrValidatorNotAttesting expectedErr.got = eth2apiv1.ValidatorStateUnknown.String() require.ErrorIs(t, err, expectedErr) @@ -501,7 +508,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Unable to process a message with a validator that is not on the network t.Run("no share metadata", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) noMetadataSK, err := eth2types.GenerateBLSPrivateKey() require.NoError(t, err) @@ -530,7 +537,7 @@ func Test_ValidateSSVMessage(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(1) receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrNoShareMetadata) require.NoError(t, ns.Shares().Delete(nil, noMetadataShare.ValidatorPubKey)) @@ -538,7 +545,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive error if more than 2 attestation duties in an epoch t.Run("too many duties", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -553,7 +560,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedValidSignedMessage, } - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.NoError(t, err) validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+4) @@ -561,7 +568,7 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) message.Data = encodedValidSignedMessage - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.NoError(t, err) validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+8) @@ -569,7 +576,7 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) message.Data = encodedValidSignedMessage - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) }) @@ -581,7 +588,7 @@ func Test_ValidateSSVMessage(t *testing.T) { dutyStore := dutystore.New() dutyStore.Proposer.Add(epoch, slot, validatorIndex+1, ð2apiv1.ProposerDuty{}, true) - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) encodedValidSignedMessage, err := validSignedMessage.Encode() @@ -593,13 +600,13 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedValidSignedMessage, } - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), nil) require.ErrorContains(t, err, ErrNoDuty.Error()) dutyStore = dutystore.New() dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) - validator = NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) - _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + validator = NewMessageValidator(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), nil) require.NoError(t, err) }) @@ -618,7 +625,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Get error when receiving message from operator who is not affiliated with the validator t.Run("signer ID not in committee", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -634,13 +641,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrSignerNotInCommittee) }) // Get error when receiving message from operator who is non-existent (operator id 0) t.Run("partial zero signer ID", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -656,13 +663,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSigner) }) // Get error when receiving partial signature message from operator who is the incorrect signer t.Run("partial inconsistent signer ID", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -679,7 +686,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnexpectedSigner expectedErr.got = spectypes.OperatorID(2) expectedErr.want = spectypes.OperatorID(1) @@ -688,7 +695,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive error when receiving a duplicated partial signature message t.Run("partial duplicated message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -705,13 +712,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrDuplicatedPartialSignatureMessage) }) // Receive error when "partialSignatureMessages" does not contain any "partialSignatureMessage" t.Run("no partial signature messages", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -728,13 +735,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrNoPartialMessages) }) // Receive error when the partial signature message is not enough bytes t.Run("partial wrong signature size", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -751,33 +758,10 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrMalformedMessage.Error()) }) - // Get error when receiving a partial signature message with an invalid signature - t.Run("partial wrong signature", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) - - slot := netCfg.Beacon.FirstSlotAtEpoch(1) - - msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) - msg.Signature = bytes.Repeat([]byte{1}, 96) - - encoded, err := msg.Encode() - require.NoError(t, err) - - message := &spectypes.SSVMessage{ - MsgType: spectypes.SSVPartialSignatureMsgType, - MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), - Data: encoded, - } - - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) - require.ErrorContains(t, err, ErrInvalidSignature.Error()) - }) - // Run partial message type validation tests t.Run("partial message type validation", func(t *testing.T) { slot := netCfg.Beacon.FirstSlotAtEpoch(162304) @@ -795,7 +779,7 @@ func Test_ValidateSSVMessage(t *testing.T) { for role, msgTypes := range tests { for _, msgType := range msgTypes { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) innerSig, r, err := spectestingutils.NewTestingKeyManager().SignBeaconObject(spectypes.SSZUint64(spectestingutils.TestingDutyEpoch), phase0.Domain{}, ks.Shares[1].GetPublicKey().Serialize(), phase0.DomainType{}) @@ -831,7 +815,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.NoError(t, err) } } @@ -839,7 +823,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Get error when receiving a message with an incorrect message type t.Run("invalid message type", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) msg := &spectypes.SignedPartialSignatureMessage{ @@ -860,7 +844,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrUnknownPartialMessageType.Error()) }) @@ -877,7 +861,7 @@ func Test_ValidateSSVMessage(t *testing.T) { for role, msgTypes := range tests { for _, msgType := range msgTypes { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) msg := &spectypes.SignedPartialSignatureMessage{ @@ -898,7 +882,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrPartialSignatureTypeRoleMismatch.Error()) } } @@ -907,7 +891,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Get error when receiving QBFT message with an invalid type t.Run("invalid QBFT message type", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -931,7 +915,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnknownQBFTMessageType require.ErrorIs(t, err, expectedErr) }) @@ -950,7 +934,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Initialize signature tests t.Run("zero signature", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -971,7 +955,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSignature) }) @@ -991,14 +975,14 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSignature) }) }) // Get error when receiving a message with an empty list of signers t.Run("no signers", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1016,13 +1000,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrNoSigners) }) // Initialize no signer tests t.Run("zero signer", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1059,7 +1043,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSigner) }) @@ -1078,7 +1062,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrZeroSigner) }) @@ -1087,7 +1071,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Get error when receiving a message with duplicated signers t.Run("non unique signer", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1106,13 +1090,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrDuplicatedSigner) }) // Get error when receiving a message with non-sorted signers t.Run("signers not sorted", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1131,13 +1115,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrSignersNotSorted) }) // Get error when receiving message from non quorum size amount of signers t.Run("wrong signers length", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1156,7 +1140,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrWrongSignersLength expectedErr.got = 2 @@ -1166,7 +1150,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Get error when receiving a non decided message with multiple signers t.Run("non decided with multiple signers", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1183,40 +1167,16 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrNonDecidedWithMultipleSigners expectedErr.got = 3 require.ErrorIs(t, err, expectedErr) }) - // Get error when receiving a proposal message with an invalid signature (random bytes) - t.Run("wrong signed signature", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) - - slot := netCfg.Beacon.FirstSlotAtEpoch(1) - - validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) - validSignedMessage.Signature = bytes.Repeat([]byte{1}, 96) - - encoded, err := validSignedMessage.Encode() - require.NoError(t, err) - - message := &spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), - Data: encoded, - } - - receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) - - require.ErrorContains(t, err, ErrInvalidSignature.Error()) - }) - // Send late message for all roles and receive late message error t.Run("late message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1244,7 +1204,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedValidSignedMessage, } - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrLateMessage.Error()) }) } @@ -1252,7 +1212,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send early message for all roles before the duty start and receive early message error t.Run("early message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1268,13 +1228,13 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot - 1) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorIs(t, err, ErrEarlyMessage) }) // Send message from non-leader acting as a leader should receive an error t.Run("not a leader", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1290,7 +1250,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrSignerNotLeader expectedErr.got = spectypes.OperatorID(2) expectedErr.want = spectypes.OperatorID(1) @@ -1299,7 +1259,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send wrong size of data (8 bytes) for a prepare justification message should receive an error t.Run("malformed prepare justification", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1317,14 +1277,14 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) }) // Send prepare justification message without a proposal message should receive an error t.Run("non-proposal with prepare justification", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1346,7 +1306,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnexpectedPrepareJustifications expectedErr.got = specqbft.PrepareMsgType @@ -1355,7 +1315,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send round change justification message without a proposal message should receive an error t.Run("non-proposal with round change justification", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1378,7 +1338,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrUnexpectedRoundChangeJustifications expectedErr.got = specqbft.PrepareMsgType @@ -1387,7 +1347,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send round change justification message with a malformed message (1 byte) should receive an error t.Run("malformed round change justification", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1405,14 +1365,14 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) }) // Send message root hash that doesnt match the expected root hash should receive an error t.Run("wrong root hash", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) height := specqbft.Height(slot) @@ -1430,7 +1390,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrInvalidHash require.ErrorIs(t, err, expectedErr) @@ -1438,7 +1398,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive proposal from same operator twice with different messages (same round) should receive an error t.Run("double proposal with different data", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1453,7 +1413,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message1, receivedAt) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) signed2 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) @@ -1470,14 +1430,14 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedSigned2, } - _, _, err = validator.validateSSVMessage(message2, receivedAt) + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) expectedErr := ErrDuplicatedProposalWithDifferentData require.ErrorIs(t, err, expectedErr) }) // Receive prepare from same operator twice with different messages (same round) should receive an error t.Run("double prepare", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1492,7 +1452,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message1, receivedAt) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) signed2 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) @@ -1507,7 +1467,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedSigned2, } - _, _, err = validator.validateSSVMessage(message2, receivedAt) + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) expectedErr := ErrTooManySameTypeMessagesPerRound expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, decided: 0, round change: 0, post-consensus: 0" require.ErrorIs(t, err, expectedErr) @@ -1515,7 +1475,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive commit from same operator twice with different messages (same round) should receive an error t.Run("double commit", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1530,7 +1490,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message1, receivedAt) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) signed2 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) @@ -1543,7 +1503,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedSigned2, } - _, _, err = validator.validateSSVMessage(message2, receivedAt) + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) expectedErr := ErrTooManySameTypeMessagesPerRound expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, decided: 0, round change: 0, post-consensus: 0" require.ErrorIs(t, err, expectedErr) @@ -1551,7 +1511,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive round change from same operator twice with different messages (same round) should receive an error t.Run("double round change", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1566,7 +1526,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(message1, receivedAt) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) require.NoError(t, err) signed2 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) @@ -1579,7 +1539,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedSigned2, } - _, _, err = validator.validateSSVMessage(message2, receivedAt) + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) expectedErr := ErrTooManySameTypeMessagesPerRound expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 1, post-consensus: 0" require.ErrorIs(t, err, expectedErr) @@ -1587,7 +1547,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive too many decided messages should receive an error t.Run("too many decided", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1607,11 +1567,11 @@ func Test_ValidateSSVMessage(t *testing.T) { receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) for i := 0; i < maxDecidedCount(len(share.Committee)); i++ { - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) require.NoError(t, err) } - _, _, err = validator.validateSSVMessage(message, receivedAt) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) expectedErr := ErrTooManySameTypeMessagesPerRound expectedErr.got = "decided, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 8, round change: 0, post-consensus: 0" require.ErrorIs(t, err, expectedErr) @@ -1619,7 +1579,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive message from a round that is too high for that epoch should receive an error t.Run("round too high", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) tests := map[spectypes.BeaconRole]specqbft.Round{ spectypes.BNRoleAttester: 13, @@ -1645,7 +1605,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(validator.waitAfterSlotStart(role)) - _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) require.ErrorContains(t, err, ErrRoundTooHigh.Error()) }) } @@ -1653,7 +1613,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Receive message from a round that is incorrect for current epoch should receive an error t.Run("round already advanced", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1669,7 +1629,7 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) require.NoError(t, err) signedMessage = spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 1) @@ -1677,7 +1637,7 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) ssvMessage.Data = encodedMessage - _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) require.ErrorContains(t, err, ErrRoundAlreadyAdvanced.Error()) }) @@ -1689,7 +1649,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Send a consensus message with a slot before the current one should cause an error t.Run("consensus message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) signedMessage := spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height+1) encodedMessage, err := signedMessage.Encode() @@ -1701,7 +1661,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedMessage, } - _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.NoError(t, err) signedMessage = spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height) @@ -1709,13 +1669,13 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) ssvMessage.Data = encodedMessage - _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) }) // Send a partial signature message with a slot before the current one should cause an error t.Run("partial signature message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) message := spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height+1) message.Message.Slot = phase0.Slot(height) + 1 @@ -1732,7 +1692,7 @@ func Test_ValidateSSVMessage(t *testing.T) { Data: encodedMessage, } - _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.NoError(t, err) message = spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height) @@ -1745,14 +1705,14 @@ func Test_ValidateSSVMessage(t *testing.T) { require.NoError(t, err) ssvMessage.Data = encodedMessage - _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) }) }) // Receive an event message from an operator that is not myself should receive an error t.Run("event message", func(t *testing.T) { - validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) slot := netCfg.Beacon.FirstSlotAtEpoch(1) @@ -1768,7 +1728,303 @@ func Test_ValidateSSVMessage(t *testing.T) { } receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) - _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) require.ErrorIs(t, err, ErrEventMessage) }) + + // Get error when receiving an SSV message with an invalid signature. + t.Run("signature verification", func(t *testing.T) { + var afterFork = netCfg.RSAForkEpoch + 1000 + + t.Run("unsigned message before fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.NoError(t, err) + }) + + t.Run("unsigned message after fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[4], 4, specqbft.Height(afterFork)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + }) + + t.Run("signed message before fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + hash := sha256.Sum256(encodedMsg) + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + signature, err := rsa.SignPKCS1v15(crand.Reader, privKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, operatorID, signature) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + + t.Run("signed message after fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + hash := sha256.Sum256(encodedMsg) + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + signature, err := rsa.SignPKCS1v15(crand.Reader, privKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, operatorID, signature) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.NoError(t, err) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + + t.Run("unexpected operator ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + hash := sha256.Sum256(encodedMsg) + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + signature, err := rsa.SignPKCS1v15(crand.Reader, privKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + const unexpectedOperatorID = 2 + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, unexpectedOperatorID, signature) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrOperatorNotFound.Error()) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + + t.Run("malformed signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, operatorID, bytes.Repeat([]byte{1}, 256)) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrRSADecryption.Error()) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + }) } diff --git a/network/commons/common.go b/network/commons/common.go index e9de6ccfb3..b32bf2da4d 100644 --- a/network/commons/common.go +++ b/network/commons/common.go @@ -31,6 +31,35 @@ const ( topicPrefix = "ssv.v2" ) +const ( + signatureSize = 256 + signatureOffset = 0 + operatorIDSize = 8 + operatorIDOffset = signatureOffset + signatureSize + messageOffset = operatorIDOffset + operatorIDSize +) + +// EncodeSignedSSVMessage serializes the message, op id and signature into bytes +func EncodeSignedSSVMessage(message []byte, operatorID spectypes.OperatorID, signature []byte) []byte { + b := make([]byte, signatureSize+operatorIDSize+len(message)) + copy(b[signatureOffset:], signature) + binary.LittleEndian.PutUint64(b[operatorIDOffset:], operatorID) + copy(b[messageOffset:], message) + return b +} + +// DecodeSignedSSVMessage deserializes signed message bytes messsage, op id and a signature +func DecodeSignedSSVMessage(encoded []byte) ([]byte, spectypes.OperatorID, []byte, error) { + if len(encoded) < messageOffset { + return nil, 0, nil, fmt.Errorf("unexpected encoded message size of %d", len(encoded)) + } + + message := encoded[messageOffset:] + operatorID := binary.LittleEndian.Uint64(encoded[operatorIDOffset : operatorIDOffset+operatorIDSize]) + signature := encoded[signatureOffset : signatureOffset+signatureSize] + return message, operatorID, signature, nil +} + // SubnetTopicID returns the topic to use for the given subnet func SubnetTopicID(subnet int) string { if subnet < 0 { @@ -74,6 +103,7 @@ func MsgID() MsgIDFunc { if len(msg) == 0 { return "" } + b := make([]byte, 12) binary.LittleEndian.PutUint64(b, xxhash.Sum64(msg)) return string(b) diff --git a/network/commons/keys.go b/network/commons/keys.go index b898e642c8..4525de49fe 100644 --- a/network/commons/keys.go +++ b/network/commons/keys.go @@ -3,6 +3,8 @@ package commons import ( "crypto/ecdsa" crand "crypto/rand" + "crypto/rsa" + "crypto/x509" "math/big" "github.com/btcsuite/btcd/btcec/v2" @@ -12,8 +14,8 @@ import ( "github.com/pkg/errors" ) -// ConvertFromInterfacePrivKey converts crypto.PrivKey back to ecdsa.PrivateKey -func ConvertFromInterfacePrivKey(privkey crypto.PrivKey) (*ecdsa.PrivateKey, error) { +// ECDSAPrivFromInterface converts crypto.PrivKey back to ecdsa.PrivateKey +func ECDSAPrivFromInterface(privkey crypto.PrivKey) (*ecdsa.PrivateKey, error) { secpKey := (privkey.(*crypto.Secp256k1PrivateKey)) rawKey, err := secpKey.Raw() if err != nil { @@ -27,8 +29,8 @@ func ConvertFromInterfacePrivKey(privkey crypto.PrivKey) (*ecdsa.PrivateKey, err return privKey, nil } -// ConvertToInterfacePrivkey converts ecdsa.PrivateKey to crypto.PrivKey -func ConvertToInterfacePrivkey(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error) { +// ECDSAPrivToInterface converts ecdsa.PrivateKey to crypto.PrivKey +func ECDSAPrivToInterface(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error) { privBytes := privkey.D.Bytes() // In the event the number of bytes outputted by the big-int are less than 32, // we append bytes to the start of the sequence for the missing most significant @@ -39,14 +41,14 @@ func ConvertToInterfacePrivkey(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error return crypto.UnmarshalSecp256k1PrivateKey(privBytes) } -// ConvertFromInterfacePubKey converts crypto.PubKey to ecdsa.PublicKey -func ConvertFromInterfacePubKey(pubKey crypto.PubKey) *ecdsa.PublicKey { +// ECDSAPubFromInterface converts crypto.PubKey to ecdsa.PublicKey +func ECDSAPubFromInterface(pubKey crypto.PubKey) *ecdsa.PublicKey { pk := btcec.PublicKey(*(pubKey.(*crypto.Secp256k1PublicKey))) return pk.ToECDSA() } -// ConvertToInterfacePubkey converts ecdsa.PublicKey to crypto.PubKey -func ConvertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { +// ECDSAPubToInterface converts ecdsa.PublicKey to crypto.PubKey +func ECDSAPubToInterface(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { xVal, yVal := new(btcec.FieldVal), new(btcec.FieldVal) if xVal.SetByteSlice(pubkey.X.Bytes()) { return nil, errors.Errorf("X value overflows") @@ -61,11 +63,17 @@ func ConvertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { return newKey, nil } +// RSAPrivToInterface converts ecdsa.PrivateKey to crypto.PrivKey +func RSAPrivToInterface(privkey *rsa.PrivateKey) (crypto.PrivKey, error) { + rsaPrivDER := x509.MarshalPKCS1PrivateKey(privkey) + return crypto.UnmarshalRsaPrivateKey(rsaPrivDER) +} + // GenNetworkKey generates a new network key func GenNetworkKey() (*ecdsa.PrivateKey, error) { privInterfaceKey, _, err := crypto.GenerateSecp256k1Key(crand.Reader) if err != nil { return nil, errors.WithMessage(err, "could not generate 256k1 key") } - return ConvertFromInterfacePrivKey(privInterfaceKey) + return ECDSAPrivFromInterface(privInterfaceKey) } diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index 91ce4e340a..98312675ce 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -107,7 +107,7 @@ func (dvs *DiscV5Service) Node(logger *zap.Logger, info peer.AddrInfo) (*enode.N if err != nil { return nil, err } - pk := commons.ConvertFromInterfacePubKey(pki) + pk := commons.ECDSAPubFromInterface(pki) id := enode.PubkeyToIDV4(pk) logger = logger.With(zap.String("info", info.String()), zap.String("enode.ID", id.String())) diff --git a/network/discovery/enode.go b/network/discovery/enode.go index 351aed40cd..0027ed4cbc 100644 --- a/network/discovery/enode.go +++ b/network/discovery/enode.go @@ -6,12 +6,13 @@ import ( "fmt" "net" - "github.com/bloxapp/ssv/network/commons" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" + + "github.com/bloxapp/ssv/network/commons" ) // createLocalNode create a new enode.LocalNode instance @@ -69,7 +70,7 @@ func ToPeer(node *enode.Node) (*peer.AddrInfo, error) { // PeerID returns the peer id of the node func PeerID(node *enode.Node) (peer.ID, error) { - pk, err := commons.ConvertToInterfacePubkey(node.Pubkey()) + pk, err := commons.ECDSAPubToInterface(node.Pubkey()) if err != nil { return "", err } diff --git a/network/discovery/enode_test.go b/network/discovery/enode_test.go index 7f7c9a12cb..bcf273548c 100644 --- a/network/discovery/enode_test.go +++ b/network/discovery/enode_test.go @@ -43,7 +43,7 @@ func Test_ParseENR(t *testing.T) { func localNodeMock(t *testing.T) *enode.LocalNode { sk, _, err := crypto.GenerateSecp256k1Key(crand.Reader) require.NoError(t, err) - pk, err := commons.ConvertFromInterfacePrivKey(sk) + pk, err := commons.ECDSAPrivFromInterface(sk) require.NoError(t, err) ip, err := commons.IPAddr() require.NoError(t, err) diff --git a/network/p2p/config.go b/network/p2p/config.go index 935eaa4c2a..f30a44ae24 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -3,10 +3,12 @@ package p2pv1 import ( "context" "crypto/ecdsa" + "crypto/rsa" "fmt" "strings" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/p2p/security/noise" libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp" @@ -54,8 +56,12 @@ type Config struct { DiscoveryTrace bool `yaml:"DiscoveryTrace" env:"DISCOVERY_TRACE" env-description:"Flag to turn on/off discovery tracing in logs"` // NetworkPrivateKey is used for network identity, MUST be injected NetworkPrivateKey *ecdsa.PrivateKey - // OperatorPublicKey is used for operator identity, optional - OperatorID string + // OperatorPrivateKey is used for operator identity, MUST be injected + OperatorPrivateKey *rsa.PrivateKey + // OperatorPubKeyHash is hash of operator public key, used for identity, optional + OperatorPubKeyHash string + // OperatorID contains numeric operator ID + OperatorID spectypes.OperatorID // Router propagate incoming network messages to the responsive components Router network.MessageRouter // UserAgent to use by libp2p identify protocol @@ -95,7 +101,7 @@ func (c *Config) Libp2pOptions(logger *zap.Logger) ([]libp2p.Option, error) { if c.NetworkPrivateKey == nil { return nil, errors.New("could not create options w/o network key") } - sk, err := commons.ConvertToInterfacePrivkey(c.NetworkPrivateKey) + sk, err := commons.ECDSAPrivToInterface(c.NetworkPrivateKey) if err != nil { return nil, errors.Wrap(err, "could not convert to interface priv key") } diff --git a/network/p2p/metrics.go b/network/p2p/metrics.go index 9792d394a1..10ba41304e 100644 --- a/network/p2p/metrics.go +++ b/network/p2p/metrics.go @@ -105,8 +105,8 @@ func (n *p2pNetwork) reportPeerIdentity(logger *zap.Logger, pid peer.ID) { } } - if pubKey, ok := n.operatorPKCache.Load(opPKHash); ok { - operatorData, found, opDataErr := n.nodeStorage.GetOperatorDataByPubKey(nil, pubKey.([]byte)) + if pubKey, ok := n.operatorPKHashToPKCache.Get(opPKHash); ok { + operatorData, found, opDataErr := n.nodeStorage.GetOperatorDataByPubKey(nil, pubKey) if opDataErr == nil && found { opID = strconv.FormatUint(operatorData.ID, 10) } @@ -118,7 +118,7 @@ func (n *p2pNetwork) reportPeerIdentity(logger *zap.Logger, pid peer.ID) { for _, operator := range operators { pubKeyHash := format.OperatorID(operator.PublicKey) - n.operatorPKCache.Store(pubKeyHash, operator.PublicKey) + n.operatorPKHashToPKCache.Set(pubKeyHash, operator.PublicKey) if pubKeyHash == opPKHash { opID = strconv.FormatUint(operator.ID, 10) } diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 768d583042..8e455d3701 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -2,10 +2,11 @@ package p2pv1 import ( "context" - "sync" + "crypto/rsa" "sync/atomic" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" connmgrcore "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" @@ -71,8 +72,11 @@ type p2pNetwork struct { backoffConnector *libp2pdiscbackoff.BackoffConnector subnets []byte libConnManager connmgrcore.ConnManager - nodeStorage operatorstorage.Storage - operatorPKCache sync.Map + + nodeStorage operatorstorage.Storage + operatorPKHashToPKCache *hashmap.Map[string, []byte] // used for metrics + operatorPrivateKey *rsa.PrivateKey + operatorID spectypes.OperatorID } // New creates a new p2p network @@ -82,17 +86,19 @@ func New(logger *zap.Logger, cfg *Config) network.P2PNetwork { logger = logger.Named(logging.NameP2PNetwork) return &p2pNetwork{ - parentCtx: cfg.Ctx, - ctx: ctx, - cancel: cancel, - interfaceLogger: logger, - cfg: cfg, - msgRouter: cfg.Router, - msgValidator: cfg.MessageValidator, - state: stateClosed, - activeValidators: hashmap.New[string, validatorStatus](), - nodeStorage: cfg.NodeStorage, - operatorPKCache: sync.Map{}, + parentCtx: cfg.Ctx, + ctx: ctx, + cancel: cancel, + interfaceLogger: logger, + cfg: cfg, + msgRouter: cfg.Router, + msgValidator: cfg.MessageValidator, + state: stateClosed, + activeValidators: hashmap.New[string, validatorStatus](), + nodeStorage: cfg.NodeStorage, + operatorPKHashToPKCache: hashmap.New[string, []byte](), + operatorPrivateKey: cfg.OperatorPrivateKey, + operatorID: cfg.OperatorID, } } diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 46493376f8..233a4189d1 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -2,8 +2,12 @@ package p2pv1 import ( "context" + "crypto" + "crypto/rsa" + "crypto/sha256" "encoding/hex" "fmt" + "github.com/bloxapp/ssv/protocol/v2/message" spectypes "github.com/bloxapp/ssv-spec/types" @@ -53,16 +57,27 @@ func (n *p2pNetwork) Broadcast(msg *spectypes.SSVMessage) error { return p2pprotocol.ErrNetworkIsNotReady } - raw, err := commons.EncodeNetworkMsg(msg) + encodedMsg, err := commons.EncodeNetworkMsg(msg) if err != nil { return errors.Wrap(err, "could not decode msg") } + if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.RSAForkEpoch { + hash := sha256.Sum256(encodedMsg) + + signature, err := rsa.SignPKCS1v15(nil, n.operatorPrivateKey, crypto.SHA256, hash[:]) + if err != nil { + return err + } + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, n.operatorID, signature) + } + vpk := msg.GetID().GetPubKey() topics := commons.ValidatorTopicID(vpk) for _, topic := range topics { - if err := n.topicsCtrl.Broadcast(topic, raw, n.cfg.RequestTimeout); err != nil { + if err := n.topicsCtrl.Broadcast(topic, encodedMsg, n.cfg.RequestTimeout); err != nil { n.interfaceLogger.Debug("could not broadcast msg", fields.PubKey(vpk), zap.Error(err)) return errors.Wrap(err, "could not broadcast msg") } diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 7a73901935..c4c3e546d0 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -161,7 +161,7 @@ func (n *p2pNetwork) setupStreamCtrl(logger *zap.Logger) error { } func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { - libPrivKey, err := p2pcommons.ConvertToInterfacePrivkey(n.cfg.NetworkPrivateKey) + libPrivKey, err := p2pcommons.ECDSAPrivToInterface(n.cfg.NetworkPrivateKey) if err != nil { return err } @@ -169,7 +169,7 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { domain := "0x" + hex.EncodeToString(n.cfg.Network.Domain[:]) self := records.NewNodeInfo(domain) self.Metadata = &records.NodeMetadata{ - OperatorID: n.cfg.OperatorID, + OperatorID: n.cfg.OperatorPubKeyHash, NodeVersion: commons.GetNodeVersion(), Subnets: records.Subnets(n.subnets).String(), } @@ -294,7 +294,7 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { cfg.ScoreIndex = nil } - midHandler := topics.NewMsgIDHandler(n.ctx, time.Minute*2) + midHandler := topics.NewMsgIDHandler(n.ctx, time.Minute*2, n.cfg.Network) n.msgResolver = midHandler cfg.MsgIDHandler = midHandler go cfg.MsgIDHandler.Start() diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 74ac3a4e14..1dd4fbd5ec 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -57,21 +57,26 @@ func (n *p2pNetwork) handleStream(logger *zap.Logger, handler p2pprotocol.Reques if err != nil { return errors.Wrap(err, "could not handle stream") } + smsg, err := commons.DecodeNetworkMsg(req) if err != nil { return errors.Wrap(err, "could not decode msg from stream") } + result, err := handler(smsg) if err != nil { return errors.Wrap(err, "could not handle msg from stream") } + resultBytes, err := commons.EncodeNetworkMsg(result) if err != nil { return errors.Wrap(err, "could not encode msg") } + if err := respond(resultBytes); err != nil { return errors.Wrap(err, "could not respond to stream") } + return nil } } @@ -114,6 +119,7 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp if err != nil { return nil, errors.Wrap(err, "could not encode sync message") } + msg := &spectypes.SSVMessage{ MsgType: message.SSVSyncMsgType, MsgID: mid, @@ -123,11 +129,13 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp if err != nil { return nil, err } + logger = logger.With(zap.String("protocol", string(protocol))) msgID := commons.MsgID() distinct := make(map[string]struct{}) for _, pid := range peers { logger := logger.With(fields.PeerID(pid)) + raw, err := n.streamCtrl.Request(logger, pid, protocol, encoded) if err != nil { // TODO: is this how to check for ErrNotSupported? @@ -137,16 +145,28 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp } continue } + + if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.RSAForkEpoch { + decodedMsg, _, _, err := commons.DecodeSignedSSVMessage(raw) + if err != nil { + logger.Debug("could not decode signed SSV message", zap.Error(err)) + } else { + raw = decodedMsg + } + } + mid := msgID(raw) if _, ok := distinct[mid]; ok { continue } distinct[mid] = struct{}{} + res, err := commons.DecodeNetworkMsg(raw) if err != nil { logger.Debug("could not decode stream response", zap.Error(err)) continue } + results = append(results, p2pprotocol.SyncResult{ Msg: res, Sender: pid.String(), diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index d2152c049e..f7275dc094 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -2,7 +2,14 @@ package p2pv1 import ( "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" "encoding/hex" + "encoding/pem" + "fmt" "sync" "sync/atomic" "testing" @@ -25,6 +32,54 @@ import ( p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) +func TestRSAUsage(t *testing.T) { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + testMessage := []byte("message") + + hash := sha256.Sum256(testMessage) + + signature, err := rsa.SignPKCS1v15(nil, privateKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + publicKey := &privateKey.PublicKey + + pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + fmt.Println("Error marshalling public key:", err) + return + } + + pubPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PUBLIC KEY", + Bytes: pubKeyBytes, + }) + + const operatorID = spectypes.OperatorID(0x12345678) + encodedSignedSSVMessage := commons.EncodeSignedSSVMessage(testMessage, operatorID, signature) + + decodedMessage, decodedOperatorID, decodedSignature, err := commons.DecodeSignedSSVMessage(encodedSignedSSVMessage) + require.NoError(t, err) + require.Equal(t, operatorID, decodedOperatorID) + require.Equal(t, signature, decodedSignature) + + messageHash := sha256.Sum256(decodedMessage) + + block, rest := pem.Decode(pubPEM) + require.NotNil(t, block) + require.Empty(t, rest, "extra data after PEM decoding") + + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + require.NoError(t, err) + + rsaPubKey, ok := pub.(*rsa.PublicKey) + require.True(t, ok) + + require.NoError(t, rsa.VerifyPKCS1v15(rsaPubKey, crypto.SHA256, messageHash[:], decodedSignature)) + require.Equal(t, testMessage, decodedMessage) +} + func TestGetMaxPeers(t *testing.T) { n := &p2pNetwork{ cfg: &Config{MaxPeers: 40, TopicMaxPeers: 8}, diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index 70e862aaa7..63706eba25 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -2,7 +2,6 @@ package p2pv1 import ( "context" - "crypto/ecdsa" "encoding/hex" "fmt" "time" @@ -49,7 +48,7 @@ func (ln *LocalNet) WithBootnode(ctx context.Context, logger *zap.Logger) error if err != nil { return err } - isk, err := commons.ConvertToInterfacePrivkey(bnSk) + isk, err := commons.ECDSAPrivToInterface(bnSk) if err != nil { return err } @@ -131,7 +130,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys if err != nil { return nil, err } - cfg := NewNetConfig(keys.NetKey, format.OperatorID([]byte(operatorPubkey)), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), maxPeers) + cfg := NewNetConfig(keys, format.OperatorID([]byte(operatorPubkey)), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), maxPeers) cfg.Ctx = ctx cfg.Subnets = "00000000000000000000020000000000" //PAY ATTENTION for future test scenarios which use more than one eth-validator we need to make this field dynamically changing cfg.NodeStorage = mock.NodeStorage{ @@ -139,6 +138,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys RegisteredOperatorPublicKeyPEMs: []string{}, } cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork) + cfg.Network = networkconfig.TestNetwork p := New(logger, cfg) err = p.Setup(logger) @@ -177,7 +177,7 @@ func NewLocalNet(ctx context.Context, logger *zap.Logger, n int, useDiscv5 bool) } // NewNetConfig creates a new config for tests -func NewNetConfig(netPrivKey *ecdsa.PrivateKey, operatorID string, bn *discovery.Bootnode, tcpPort, udpPort, maxPeers int) *Config { +func NewNetConfig(keys testing.NodeKeys, operatorPubKeyHash string, bn *discovery.Bootnode, tcpPort, udpPort, maxPeers int) *Config { bns := "" discT := "discv5" if bn != nil { @@ -187,19 +187,20 @@ func NewNetConfig(netPrivKey *ecdsa.PrivateKey, operatorID string, bn *discovery } ua := "" return &Config{ - Bootnodes: bns, - TCPPort: tcpPort, - UDPPort: udpPort, - HostAddress: "", - HostDNS: "", - RequestTimeout: 10 * time.Second, - MaxBatchResponse: 25, - MaxPeers: maxPeers, - PubSubTrace: false, - NetworkPrivateKey: netPrivKey, - OperatorID: operatorID, - UserAgent: ua, - Discovery: discT, + Bootnodes: bns, + TCPPort: tcpPort, + UDPPort: udpPort, + HostAddress: "", + HostDNS: "", + RequestTimeout: 10 * time.Second, + MaxBatchResponse: 25, + MaxPeers: maxPeers, + PubSubTrace: false, + NetworkPrivateKey: keys.NetKey, + OperatorPrivateKey: keys.OperatorKey, + OperatorPubKeyHash: operatorPubKeyHash, + UserAgent: ua, + Discovery: discT, Permissioned: func() bool { return false }, diff --git a/network/peers/index.go b/network/peers/index.go index 8e8cab40b8..35686ee2c7 100644 --- a/network/peers/index.go +++ b/network/peers/index.go @@ -4,13 +4,14 @@ import ( "crypto/rsa" "io" - "github.com/bloxapp/ssv/network/records" "github.com/libp2p/go-libp2p/core/network" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/network/records" ) const ( @@ -71,7 +72,7 @@ type NodeInfoIndex interface { NodeInfo(id peer.ID) *records.NodeInfo } -// InfoIndex is an interface for managing PeerInfo of network peers +// PeerInfoIndex is an interface for managing PeerInfo of network peers type PeerInfoIndex interface { // PeerInfo returns the PeerInfo of the given peer, or nil if not found. PeerInfo(peer.ID) *PeerInfo diff --git a/network/peers/scores_test.go b/network/peers/scores_test.go index d0178d45a6..763be7c974 100644 --- a/network/peers/scores_test.go +++ b/network/peers/scores_test.go @@ -15,7 +15,7 @@ func TestScoresIndex(t *testing.T) { nks, err := nettesting.CreateKeys(1) require.NoError(t, err) - sk, err := commons.ConvertToInterfacePrivkey(nks[0].NetKey) + sk, err := commons.ECDSAPrivToInterface(nks[0].NetKey) require.NoError(t, err) pid, err := peer.IDFromPrivateKey(sk) require.NoError(t, err) diff --git a/network/peers/subnets_test.go b/network/peers/subnets_test.go index b28d62e04e..5679ad71d9 100644 --- a/network/peers/subnets_test.go +++ b/network/peers/subnets_test.go @@ -19,7 +19,7 @@ func TestSubnetsIndex(t *testing.T) { var pids []peer.ID for _, nk := range nks { - sk, err := commons.ConvertToInterfacePrivkey(nk.NetKey) + sk, err := commons.ECDSAPrivToInterface(nk.NetKey) require.NoError(t, err) pid, err := peer.IDFromPrivateKey(sk) require.NoError(t, err) diff --git a/network/records/subnets_test.go b/network/records/subnets_test.go index b09dd12fd0..47e35f88ab 100644 --- a/network/records/subnets_test.go +++ b/network/records/subnets_test.go @@ -15,7 +15,7 @@ func Test_SubnetsEntry(t *testing.T) { SubnetsCount := 128 priv, _, err := crypto.GenerateSecp256k1Key(crand.Reader) require.NoError(t, err) - sk, err := commons.ConvertFromInterfacePrivKey(priv) + sk, err := commons.ECDSAPrivFromInterface(priv) require.NoError(t, err) ip, err := commons.IPAddr() require.NoError(t, err) diff --git a/network/topics/controller.go b/network/topics/controller.go index bbc9e3f821..8e58a34589 100644 --- a/network/topics/controller.go +++ b/network/topics/controller.go @@ -6,13 +6,13 @@ import ( "strconv" "time" - spectypes "github.com/bloxapp/ssv-spec/types" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "go.uber.org/zap" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) var ( @@ -240,7 +240,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err continue } - if ssvMsg, ok := msg.ValidatorData.(spectypes.SSVMessage); ok { + if ssvMsg, ok := msg.ValidatorData.(*queue.DecodedSSVMessage); ok { metricPubsubInbound.WithLabelValues( commons.GetTopicBaseName(topicName), strconv.FormatUint(uint64(ssvMsg.MsgType), 10), diff --git a/network/topics/controller_test.go b/network/topics/controller_test.go index 4a09584cfb..c48ff4564d 100644 --- a/network/topics/controller_test.go +++ b/network/topics/controller_test.go @@ -348,7 +348,7 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator var p *P var midHandler MsgIDHandler if msgID { - midHandler = NewMsgIDHandler(ctx, 2*time.Minute) + midHandler = NewMsgIDHandler(ctx, 2*time.Minute, networkconfig.TestNetwork) go midHandler.Start() } cfg := &PubSubConfig{ diff --git a/network/topics/msg_id.go b/network/topics/msg_id.go index 92af89b1d0..b5d1efb777 100644 --- a/network/topics/msg_id.go +++ b/network/topics/msg_id.go @@ -8,6 +8,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" ps_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" @@ -53,21 +54,23 @@ type msgIDEntry struct { // msgIDHandler implements MsgIDHandler type msgIDHandler struct { - ctx context.Context - added chan addedEvent - ids map[string]*msgIDEntry - locker sync.Locker - ttl time.Duration + ctx context.Context + added chan addedEvent + ids map[string]*msgIDEntry + locker sync.Locker + ttl time.Duration + networkConfig networkconfig.NetworkConfig } // NewMsgIDHandler creates a new MsgIDHandler -func NewMsgIDHandler(ctx context.Context, ttl time.Duration) MsgIDHandler { +func NewMsgIDHandler(ctx context.Context, ttl time.Duration, networkConfig networkconfig.NetworkConfig) MsgIDHandler { handler := &msgIDHandler{ - ctx: ctx, - added: make(chan addedEvent, msgIDHandlerBufferSize), - ids: make(map[string]*msgIDEntry), - locker: &sync.Mutex{}, - ttl: ttl, + ctx: ctx, + added: make(chan addedEvent, msgIDHandlerBufferSize), + ids: make(map[string]*msgIDEntry), + locker: &sync.Mutex{}, + ttl: ttl, + networkConfig: networkConfig, } return handler } @@ -96,31 +99,51 @@ func (handler *msgIDHandler) MsgID(logger *zap.Logger) func(pmsg *ps_pb.Message) if pmsg == nil { return MsgIDEmptyMessage } - //logger := logger.With() - if len(pmsg.GetData()) == 0 { + + messageData := pmsg.GetData() + if len(messageData) == 0 { return MsgIDEmptyMessage } + pid, err := peer.IDFromBytes(pmsg.GetFrom()) if err != nil { return MsgIDBadPeerID } - mid := commons.MsgID()(pmsg.GetData()) + + mid := handler.pubsubMsgToMsgID(messageData) + if len(mid) == 0 { logger.Debug("could not create msg_id", zap.ByteString("seq_no", pmsg.GetSeqno()), fields.PeerID(pid)) return MsgIDError } + handler.Add(mid, pid) return mid } } +func (handler *msgIDHandler) pubsubMsgToMsgID(msg []byte) string { + currentEpoch := handler.networkConfig.Beacon.EstimatedCurrentEpoch() + if currentEpoch > handler.networkConfig.RSAForkEpoch { + decodedMsg, _, _, err := commons.DecodeSignedSSVMessage(msg) + if err != nil { + // todo: should err here or just log and let the decode function err? + } else { + return commons.MsgID()(decodedMsg) + } + } + return commons.MsgID()(msg) +} + // GetPeers returns the peers that are related to the given msg func (handler *msgIDHandler) GetPeers(msg []byte) []peer.ID { - msgID := commons.MsgID()(msg) + msgID := handler.pubsubMsgToMsgID(msg) + handler.locker.Lock() defer handler.locker.Unlock() + entry, ok := handler.ids[msgID] if ok { if !entry.t.Add(handler.ttl).After(time.Now()) { diff --git a/network/topics/msg_validator_test.go b/network/topics/msg_validator_test.go index dd66fb8312..dc24227ccd 100644 --- a/network/topics/msg_validator_test.go +++ b/network/topics/msg_validator_test.go @@ -43,7 +43,7 @@ func TestMsgValidator(t *testing.T) { } require.NoError(t, ns.Shares().Save(nil, share)) - mv := validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithShareStorage(ns.Shares())) + mv := validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithNodeStorage(ns)) require.NotNil(t, mv) slot := networkconfig.TestNetwork.Beacon.GetBeaconNetwork().EstimatedCurrentSlot() diff --git a/networkconfig/config.go b/networkconfig/config.go index d2981c2c41..3499c04983 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -38,6 +38,7 @@ type NetworkConfig struct { RegistryContractAddr string // TODO: ethcommon.Address Bootnodes []string WhitelistedOperatorKeys []string + RSAForkEpoch spec.Epoch } func (n NetworkConfig) String() string { diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go index c3e9d1aa8a..bbecaef474 100644 --- a/networkconfig/holesky-stage.go +++ b/networkconfig/holesky-stage.go @@ -19,4 +19,5 @@ var HoleskyStage = NetworkConfig{ "enr:-Li4QNUN0RdeoHjI4Np18-PX1VXrJ2rJMo2OarRz0wCAxiYlD3s_E4zsmXi1LHv62ULLBT-AQfZIjYefEoEsMDkaEKCGAYtCguORh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhKfrtCyJc2VjcDI1NmsxoQP2e508AoA0B-KH-IaAd3nVCfI9q16lNztV-oTpcH72tIN0Y3CCE4mDdWRwgg-h", }, WhitelistedOperatorKeys: []string{}, + RSAForkEpoch: 0, } diff --git a/networkconfig/jato-v2.go b/networkconfig/jato-v2.go index 36f2775b61..3e0cfe44c3 100644 --- a/networkconfig/jato-v2.go +++ b/networkconfig/jato-v2.go @@ -33,4 +33,5 @@ var JatoV2 = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNmkwelNHRzFiaHlPZU8xVDVxc2UKOFpHbElBQ2pmemVYQzhpYVVReGVCb0dlVGRvN0tqalkwNy80b3hBNkhjdG45bEtxd1BodG5ISXIvZ1RlWXNYUwp5QVhPL1Q5K2RQcng1ZEp3SEVCdm5BcmNSQkNzaGF5Sng2S0xiZ3RJb2dGSWhkK1ptaFpiWFpWZVp5THhzK2tZCnM4djVwcHBIbWNwWHRwUVAxWm1ycndpTC9hZU5JNzczbUlrZ1pBOGdNK2Z5S2RtTGJrQXdXZWh1SXZKRmpuVCsKQlVkUHUzWGJIemU2SlJnY2NYNmZnM1gwOTJibG9VMzRxY1VIelNhWU9TZlc2TUpEbFgzQzJCeFhCZ042VFV0aQpDN2k2ZE9qaW14RzlSMkp4ZHVhZGpUeEM1MHl5OE9IVWpMVGNkc2pWRjdYNXdGUzFqaDI5aFpDY0FoeDB2NDg3CjdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNldITnNBdTdSYnMxM0I2c0taWXgKVnZuMldlTy9YMTdSeUx1MjA0K2VtbjkvSGhIRlhXT29CMGczekNZQWp2WWdsbFJka0laTWt3ZkFUNGZvVjVTKwpvNzFFQ1dFN1ZuaytxcWd0U3k5M0ZTTVJzUG9vNngrTUd4ZURBQ3RQbDdQV1EyTXJmV1hkNzVwV1p5TVd5VndHCktPbFo0RHhoQ0VOcXlRcndlOTkybU9wVDZBcTJ1TmVsUmdESUJDSW1CV01NcUl2aXdhSU96MlBmTWR1L3ZVTWgKcVFuNGJJZjFpcVk2WGlKU1g2bDJvUWlTb09VMjRvNkFCdHlHbzRpTDJXN2tOajVUa1hOOEVzeGc3WmUveVQ0YgpKNGtvVjdmNUE3dmpMbHc1ZkdjWDR1bTBNK1QwbnczUlVIY3pHK1E3U1VGMTFGU3c0VnM1WVBHWC84a2tzdXgyCkx3SURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, + RSAForkEpoch: 0, // todo: decide forking epoch } diff --git a/networkconfig/local-testnet.go b/networkconfig/local-testnet.go index 09e829453f..f96cbbaeea 100644 --- a/networkconfig/local-testnet.go +++ b/networkconfig/local-testnet.go @@ -1,6 +1,8 @@ package networkconfig import ( + "math" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ) @@ -14,4 +16,5 @@ var LocalTestnet = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QLR4Y1VbwiqFYKy6m-WFHRNDjhMDZ_qJwIABu2PY9BHjIYwCKpTvvkVmZhu43Q6zVA29sEUhtz10rQjDJkK3Hd-GAYiGrW2Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQJTcI7GHPw-ZqIflPZYYDK_guurp_gsAFF5Erns3-PAvIN0Y3CCE4mDdWRwgg-h", }, + RSAForkEpoch: math.MaxUint64, } diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 2326420cde..1412f54ae3 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -1,6 +1,7 @@ package networkconfig import ( + "math" "math/big" spectypes "github.com/bloxapp/ssv-spec/types" @@ -40,4 +41,5 @@ var Mainnet = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcU5Sd0xWSHNWMEloUjdjdUJkb1AKZnVwNTkydEJFSG0vRllDREZQbERMR2NVZ2NzZ29PdHBsV2hMRjBGSzIwQ3ppVi83WVZzcWpxcDh3VDExM3pBbQoxOTZZRlN6WmUzTFhOQXFRWlBwbDlpOVJxdVJJMGlBT2xiWUp0ampJRjd2ZVZLbVdybzMwWTZDV3JPcHpVQ1BPClRGVEpGZ0hvZmtQT2pabmprNURtdDg2ZURveUxzenJQZWQ0LzlyR2NNVUp4WnJBSjEvbFR1ajNaWWVJUk0wS04KUVQ0eitPb3p0T0dBeDVVcUk2THpQL3NGOWRJM3BzM3BIb3dXOWF2RHp3Qm94Y3hWam14NWhRMXowOTN4MnlkYgpWcjgxNDgzTzdqUkt6eFpXeEduOFJzZUROZkxwSi93VFJiQ0lVOFhwUC9IKzd6TWNGMG1HbVlUcjAvcWR1bVNsCjNRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdmRWVVJ0OFgxbFA5VDVSUUdYdVkKcFpZWjVBb3VuSEdUakMvQ1FoTmQ5RC9kT2kvSDUwVW1PdVBpTzhYYUF4UFRGcGIrZ2xCeGJRRHVQUGN1cENPdQpKN09lVTBvdzdsQjVMclZlWWt3RExnSHY3bDQwcjRWVTM3NlFueGhuS0JyVHNkaWdmZHJYUWZveGRhajVQQ0VYCnFjK1ozNXFPUmpCZ3dublRlbEJjc2NLMHorSkJaQzU0OXFOWThMbm9aMTBuRFptdW1YVDlac3dISCtJVkZacDYKMEZTY0k0V1V5U1gxVnJJT2tSandoSWlCSFk3YkhrZ01Bci9xeStuRmlFUUVRV2Q2VXAwOWtkS0hNVmdtVFp4KwprQXZRbFZ0Z3luYkFPWkNMeng0Ymo1Yi9MQklIejNiTk9zWlNtR3AxWi9hWDFkd1BaMlhOai83elovNGpuM095CkdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, + RSAForkEpoch: math.MaxUint64, // TODO: change epoch before forking mainnet } diff --git a/networkconfig/test-network.go b/networkconfig/test-network.go index a0e58f2819..628c24227d 100644 --- a/networkconfig/test-network.go +++ b/networkconfig/test-network.go @@ -18,4 +18,5 @@ var TestNetwork = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QO86ZMZr_INMW_WQBsP2jS56yjrHnZXxAUOKJz4_qFPKD1Cr3rghQD2FtXPk2_VPnJUi8BBiMngOGVXC0wTYpJGGAYgqnGSNh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQKNW0Mf-xTXcevRSkZOvoN0Q0T9OkTjGZQyQeOl3bYU3YN0Y3CCE4iDdWRwgg-g;enr:-Li4QBoH15fXLV78y1_nmD5sODveptALORh568iWLS_eju3SUvF2ZfGE2j-nERKU1zb2g5KlS8L70SRLdRUJ-pHH-fmGAYgvh9oGh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQO_tV3JP75ZUZPjhOgc2VqEu_FQEMeHc4AyOz6Lz33M2IN0Y3CCE4mDdWRwgg-h", }, + RSAForkEpoch: 123456789, } diff --git a/operator/slotticker/slotticker_test.go b/operator/slotticker/slotticker_test.go index 612e61d492..044e945829 100644 --- a/operator/slotticker/slotticker_test.go +++ b/operator/slotticker/slotticker_test.go @@ -133,9 +133,9 @@ func TestMultipleSlotTickers(t *testing.T) { elapsed := time.Since(start) expectedDuration := slotDuration * ticksPerTimer - // We'll allow a small buffer for drift, say 1% - buffer := expectedDuration * 1 / 100 - assert.True(t, elapsed <= expectedDuration+buffer, "Expected all tickers to complete within %v but took %v", expectedDuration.String(), elapsed.String()) + // We'll allow a small buffer for drift, say 5% + buffer := expectedDuration * 5 / 100 + assert.True(t, elapsed <= expectedDuration+buffer, "Expected all tickers to complete within", expectedDuration.String(), "but took", elapsed.String()) } func TestSlotSkipping(t *testing.T) { diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 05182a9843..366b4dba0e 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -81,7 +81,6 @@ type ControllerOptions struct { Metrics validator.Metrics MessageValidator validation.MessageValidator ValidatorsMap *validatorsmap.ValidatorsMap - VerifySignatures bool // worker flags WorkersCount int `yaml:"MsgWorkersCount" env:"MSG_WORKERS_COUNT" env-default:"256" env-description:"Number of goroutines to use for message workers"` @@ -163,7 +162,7 @@ type controller struct { // NewController creates a new validator controller instance func NewController(logger *zap.Logger, options ControllerOptions) Controller { - logger.Debug("setting up validator controller", zap.Bool("message_validation_verify_signatures", options.VerifySignatures)) + logger.Debug("setting up validator controller") // lookup in a map that holds all relevant operators operatorsIDs := &sync.Map{} @@ -190,7 +189,6 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { GasLimit: options.GasLimit, MessageValidator: options.MessageValidator, Metrics: options.Metrics, - VerifySignatures: options.VerifySignatures, } // If full node, increase queue size to make enough room @@ -830,7 +828,7 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt Storage: options.Storage.Get(role), Network: options.Network, Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), - SignatureVerification: options.VerifySignatures, + SignatureVerification: true, } config.ValueCheckF = valueCheckF diff --git a/operator/validator/router.go b/operator/validator/router.go index e090cff3bc..1d43c73b16 100644 --- a/operator/validator/router.go +++ b/operator/validator/router.go @@ -5,7 +5,6 @@ import ( "go.uber.org/zap" - "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) @@ -15,14 +14,12 @@ func newMessageRouter(logger *zap.Logger) *messageRouter { return &messageRouter{ logger: logger, ch: make(chan *queue.DecodedSSVMessage, bufSize), - msgID: commons.MsgID(), } } type messageRouter struct { logger *zap.Logger ch chan *queue.DecodedSSVMessage - msgID commons.MsgIDFunc } func (r *messageRouter) Route(ctx context.Context, message *queue.DecodedSSVMessage) { diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index e1bcf47df7..c9d2dbb431 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -25,7 +25,7 @@ func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID Domain: types.GetDefaultDomain(), Storage: opts.Storage.Get(identifier.GetRoleType()), Network: opts.Network, - SignatureVerification: opts.VerifySignatures, + SignatureVerification: true, } ctrl := qbftcontroller.NewController(identifier[:], &opts.SSVShare.Share, types.GetDefaultDomain(), config, opts.FullNode) ctrl.StoredInstances = make(qbftcontroller.InstanceContainer, 0, nonCommitteeInstanceContainerCapacity(opts.FullNode)) diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index 9c2e0d81a7..8b32cfe0fa 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -34,7 +34,6 @@ type Options struct { GasLimit uint64 MessageValidator validation.MessageValidator Metrics Metrics - VerifySignatures bool } func (o *Options) defaults() { diff --git a/protocol/v2/types/signature_benchmark_linux_test.go b/protocol/v2/types/signature_benchmark_linux_test.go new file mode 100644 index 0000000000..d3a9b295e2 --- /dev/null +++ b/protocol/v2/types/signature_benchmark_linux_test.go @@ -0,0 +1,70 @@ +//go:build linux + +package types + +import ( + "crypto" + "crypto/sha256" + "fmt" + "testing" + + "github.com/microsoft/go-crypto-openssl/openssl" + "github.com/microsoft/go-crypto-openssl/openssl/bbig/bridge" +) + +func init() { + if err := openssl.Init(); err != nil { + panic(err) + } +} + +func BenchmarkVerifyPKCS1v15OpenSSL(b *testing.B) { + dataOpenSSL := []byte("This is test data for OpenSSL verification.") + hashedOpenSSL := sha256.Sum256(dataOpenSSL) + + priv, pub := newOpenSSLRSAKey(2048) + + sig, err := openssl.SignRSAPKCS1v15(priv, crypto.SHA256, hashedOpenSSL[:]) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := openssl.VerifyRSAPKCS1v15(pub, crypto.SHA256, hashedOpenSSL[:], sig) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignPKCS1v15OpenSSL(b *testing.B) { + dataOpenSSL := []byte("This is test data for OpenSSL verification.") + hashedOpenSSL := sha256.Sum256(dataOpenSSL) + + priv, _ := newOpenSSLRSAKey(2048) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := openssl.SignRSAPKCS1v15(priv, crypto.SHA256, hashedOpenSSL[:]) + if err != nil { + b.Fatal(err) + } + } +} + +func newOpenSSLRSAKey(size int) (*openssl.PrivateKeyRSA, *openssl.PublicKeyRSA) { + N, E, D, P, Q, Dp, Dq, Qinv, err := bridge.GenerateKeyRSA(size) + if err != nil { + panic(fmt.Sprintf("GenerateKeyRSA(%d): %v", size, err)) + } + priv, err := bridge.NewPrivateKeyRSA(N, E, D, P, Q, Dp, Dq, Qinv) + if err != nil { + panic(fmt.Sprintf("NewPrivateKeyRSA(%d): %v", size, err)) + } + pub, err := bridge.NewPublicKeyRSA(N, E) + if err != nil { + panic(fmt.Sprintf("NewPublicKeyRSA(%d): %v", size, err)) + } + return priv, pub +} diff --git a/protocol/v2/types/signature_benchmark_test.go b/protocol/v2/types/signature_benchmark_test.go new file mode 100644 index 0000000000..074182e10a --- /dev/null +++ b/protocol/v2/types/signature_benchmark_test.go @@ -0,0 +1,180 @@ +package types + +import ( + "crypto" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "testing" + + "github.com/herumi/bls-eth-go-binary/bls" +) + +var ( + privateKey *rsa.PrivateKey + publicKey *rsa.PublicKey + signature []byte + data = []byte("This is some test data for verification.") + hashed = sha256.Sum256(data) +) + +var ( + privateKeyPSS *rsa.PrivateKey + publicKeyPSS *rsa.PublicKey + pssSignature []byte + dataPSS = []byte("This is some test data for PSS verification.") + hashedPSS = sha256.Sum256(dataPSS) +) + +var ( + privateKeyFast *rsa.PrivateKey + publicKeyFast *rsa.PublicKey + signatureFast []byte + dataFast = []byte("This is test data for fast verification.") + hashedFast = md5.Sum(dataFast) +) + +func init() { + var err error + privateKey, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + publicKey = &privateKey.PublicKey + + signature, err = rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, hashed[:]) + if err != nil { + panic(err) + } + + if err := bls.Init(bls.BLS12_381); err != nil { + panic(err) + } + + if err := bls.SetETHmode(bls.EthModeLatest); err != nil { + panic(err) + } + + privateKeyPSS, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + publicKeyPSS = &privateKeyPSS.PublicKey + + pssOptions := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + } + + pssSignature, err = rsa.SignPSS(rand.Reader, privateKeyPSS, crypto.SHA256, hashedPSS[:], pssOptions) + if err != nil { + panic(err) + } + + privateKeyFast, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + publicKeyFast = &privateKeyFast.PublicKey + + signatureFast, err = rsa.SignPKCS1v15(rand.Reader, privateKeyFast, crypto.MD5, hashedFast[:]) + if err != nil { + panic(err) + } +} + +func BenchmarkVerifyBLS(b *testing.B) { + secKey := new(bls.SecretKey) + secKey.SetByCSPRNG() + pubKey := secKey.GetPublicKey() + msg := []byte("This is some test data for verification.") + sig := secKey.SignByte(msg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !sig.VerifyByte(pubKey, msg) { + b.Fatal("Verification failed") + } + } +} + +func BenchmarkVerifyPKCS1v15(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, hashed[:], signature) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkVerifyPKCS1v15FastHash(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rsa.VerifyPKCS1v15(publicKeyFast, crypto.MD5, hashedFast[:], signatureFast) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkVerifyPSS(b *testing.B) { + pssOptions := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rsa.VerifyPSS(publicKeyPSS, crypto.SHA256, hashedPSS[:], pssSignature, pssOptions) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignBLS(b *testing.B) { + secKey := new(bls.SecretKey) + secKey.SetByCSPRNG() + msg := []byte("This is some test data for verification.") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = secKey.SignByte(msg) + } +} + +func BenchmarkSignPKCS1v15(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, hashed[:]) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignPKCS1v15FastHash(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rsa.SignPKCS1v15(rand.Reader, privateKeyFast, crypto.MD5, hashedFast[:]) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignPSS(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + pssOptions := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + } + + _, err := rsa.SignPSS(rand.Reader, privateKeyPSS, crypto.SHA256, hashedPSS[:], pssOptions) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/utils/keys.go b/utils/keys.go index a237b17258..4287d03e38 100644 --- a/utils/keys.go +++ b/utils/keys.go @@ -25,7 +25,7 @@ func ECDSAPrivateKey(logger *zap.Logger, privateKey string) (*ecdsa.PrivateKey, if err != nil { return nil, errors.WithMessage(err, "failed to unmarshal passed privKey") } - privKey, err = commons.ConvertFromInterfacePrivKey(unmarshalledKey) + privKey, err = commons.ECDSAPrivFromInterface(unmarshalledKey) if err != nil { return nil, err } @@ -35,12 +35,12 @@ func ECDSAPrivateKey(logger *zap.Logger, privateKey string) (*ecdsa.PrivateKey, if err != nil { return nil, errors.WithMessage(err, "failed to generate 256k1 key") } - privKey, err = commons.ConvertFromInterfacePrivKey(privInterfaceKey) + privKey, err = commons.ECDSAPrivFromInterface(privInterfaceKey) if err != nil { return nil, err } } - interfacePriv, err := commons.ConvertToInterfacePrivkey(privKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privKey) if err != nil { return nil, err } From d32102b4dd21a88385a5ff1c19a8e4ff039064fe Mon Sep 17 00:00:00 2001 From: systemblox <40427708+systemblox@users.noreply.github.com> Date: Thu, 9 Nov 2023 17:51:59 +0200 Subject: [PATCH 40/54] Changed ports for exporter (#1201) Co-authored-by: stoyan.peev --- .k8/hetzner-stage/ssv-exporter-holesky.yml | 70 +++++++++++----------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/.k8/hetzner-stage/ssv-exporter-holesky.yml b/.k8/hetzner-stage/ssv-exporter-holesky.yml index 10fb398390..6f71edfb51 100644 --- a/.k8/hetzner-stage/ssv-exporter-holesky.yml +++ b/.k8/hetzner-stage/ssv-exporter-holesky.yml @@ -9,26 +9,26 @@ metadata: spec: type: ClusterIP ports: - - port: 12013 + - port: 12073 protocol: UDP - targetPort: 12013 - name: port-12013 - - port: 13013 + targetPort: 12073 + name: port-12073 + - port: 13073 protocol: TCP - targetPort: 13013 - name: port-13013 - - port: 14013 + targetPort: 13073 + name: port-13073 + - port: 14073 protocol: TCP - targetPort: 14013 - name: port-14013 - - port: 15013 + targetPort: 14073 + name: port-14073 + - port: 15073 protocol: TCP - targetPort: 15013 - name: port-15013 - - port: 16013 + targetPort: 15073 + name: port-15073 + - port: 16073 protocol: TCP - targetPort: 16013 - name: port-16013 + targetPort: 16073 + name: port-16073 selector: app: ssv-exporter-holesky --- @@ -70,22 +70,22 @@ spec: memory: REPLACE_EXPORTER_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12013 - name: port-12013 - hostPort: 12013 + - containerPort: 12073 + name: port-12073 + hostPort: 12073 protocol: UDP - - containerPort: 13013 - name: port-13013 - hostPort: 13013 - - containerPort: 14013 - name: port-14013 - hostPort: 14013 - - containerPort: 15013 - name: port-15013 - hostPort: 15013 - - containerPort: 16013 - name: port-16013 - hostPort: 16013 + - containerPort: 13073 + name: port-13073 + hostPort: 13073 + - containerPort: 14073 + name: port-14073 + hostPort: 14073 + - containerPort: 15073 + name: port-15073 + hostPort: 15073 + - containerPort: 16073 + name: port-16073 + hostPort: 16073 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -113,17 +113,17 @@ spec: - name: DB_REPORTING value: "false" - name: METRICS_API_PORT - value: "15013" + value: "15073" - name: SSV_API_PORT - value: "16013" + value: "16073" - name: ENABLE_PROFILE value: "true" - name: UDP_PORT - value: "12013" + value: "12073" - name: TCP_PORT - value: "13003" + value: "13073" - name: WS_API_PORT - value: "14013" + value: "14073" - name: FULLNODE value: "true" - name: EXPORTER From aa4e88dcb920124a45470a2b6883c90a504b8b4d Mon Sep 17 00:00:00 2001 From: Taiga <125817027+zktaiga@users.noreply.github.com> Date: Wed, 15 Nov 2023 17:18:14 +0400 Subject: [PATCH 41/54] Fix dashboards and refactor docs (#1176) * Refactor Node Dashboard * Refactor Operator Performance Dashboard * Refactored Monitoring guide Added simpler Grafana config files * Fixed broken link and grammar suggestions * Remove single dashboards * Monitoring docs --------- Co-authored-by: Lior Rutenberg Co-authored-by: Massimo Luraschi --- monitoring/README.md | 95 +- monitoring/grafana/dashboard_ssv_node.json | 488 ++++--- .../dashboard_ssv_operator_performance.json | 1223 +++++++++-------- 3 files changed, 1037 insertions(+), 769 deletions(-) diff --git a/monitoring/README.md b/monitoring/README.md index 4e493bd81b..85150d46c6 100644 --- a/monitoring/README.md +++ b/monitoring/README.md @@ -6,64 +6,103 @@ # SSV - Monitoring -`/metrics` end-point is exposing metrics from ssv node to prometheus. +This page will outline how to monitor an SSV Node using Grafana and Prometheus. +### Pre-requisites +Make sure your node is exposing a `/metrics` and `/health` endpoints. This is done via node configuration, as explained in the [Installation guide on the docs](https://docs.ssv.network/run-a-node/operator-node/installation#create-configuration-file). -Prometheus should also hit `/health` end-point in order to collect the health check metrics. \ -Even if prometheus is not configured, the end-point can simply be polled by a simple HTTP client -(it doesn't contain metrics) +This guide will not go into the details of setting up and running Prometheus or Grafana. For this, we recommend visiting their related documentations: + +[Prometheus docs](https://prometheus.io/docs/introduction/overview/) + +[Grafana docs](https://grafana.com/docs/) + +For Grafana, specifically, [Grafana Cloud](https://grafana.com/docs/grafana-cloud/) is a viable solution, especially for beginners. See the configuration of a [local prometheus service](prometheus/prometheus.yaml). ### Health Check -Health check route is available on `GET /health`. \ -In case the node is healthy it returns an HTTP Code `200` with empty response: +Even if Prometheus is not configured, the `/health` end-point can simply be polled by a simple HTTP client as a health check. \ +In case the node is healthy it returns an HTTP Code `200` with an empty response: ```shell $ curl http://localhost:15000/health ``` -If the node is not healthy, the corresponding errors will be returned with HTTP Code `500`: +If the node is not healthy, the corresponding errors will be returned with HTTP a Code of `500`: ```shell $ curl http://localhost:15000/health {"errors": ["could not sync eth1 events"]} ``` -## Metrics +## Prometheus -`MetricsAPIPort` is used to enable prometheus metrics collection: +In a typical setup, where only one SSV node Docker container is running, Prometheus should be configured with a file like this: -Example: ```yaml -MetricsAPIPort: 15000 +global: + scrape_interval: 10s + evaluation_interval: 10s + +scrape_configs: + - job_name: ssv + metrics_path: /metrics + static_configs: + - targets: + # change the targets according to your setup + # if running prometheus from source, or as executable: + # - :15000 (i.e.: ssv_node:15000, check with docker ps command) + # if running prometheus as docker container: + - host.docker.internal:15000 + - job_name: ssv_health + metrics_path: /health + static_configs: + - targets: + # change the targets according to your setup + # if running prometheus from source, or as executable: + # - :15000 (i.e.: ssv_node:15000, check with docker ps command) + # if running prometheus as docker container: + - host.docker.internal:15000 + ``` -Or as env variable: -```shell -METRICS_API_PORT=15000 +And to launch the Prometheus service as a Docker container as well ([using the official Docker image, as shown here](https://hub.docker.com/r/prom/prometheus)), use this command, where `/path/to/prometheus.yml` is the path and filename of the configuration file described above: + +```bash +docker run \ + -p 9090:9090 \ + -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \ + prom/prometheus ``` -## Grafana +> ⚠️ Note: If you are not running Prometheus as a Docker container, but as an executable, change the `targets` in the config file to reflect the correct networking connections. In the case where the SSV Node container is called `ssv_node` the targets should look like this: + +```yaml + - targets: + - ssv_node:15000 +``` + +> Use the `docker ps` command to verify the name of the SSV Node container. + +## Grafana monitoring + +After successfully configuring a Prometheus service, and [adding it as a data source to Grafana](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure-prometheus-data-source/) (read [here for Grafana Cloud](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/data-sources/prometheus/configure-prometheus-data-source/)), a Grafana dashboard can be created. + +Below, an example of two dashboards, respectively monitoring the SSV Node and the performance of an Operator: -In order to setup a grafana dashboard do the following: -1. Enable metrics (`MetricsAPIPort`) -2. Setup Prometheus as mentioned in the beginning of this document and add as data source - * Job name assumed to be '`ssv`' -3. Import dashboards to Grafana: - * [SSV Node dashboard](./grafana/NODE.md) - * [Operator Performance dashboard](./grafana/PERF.md) -4. Align dashboard variables: - * `instance` - container name, used in 'instance' field for metrics coming from prometheus. \ - In the given dashboard, instances names are: `ssv-node-v2-`, make sure to change according to your setup +* [SSV Node monitoring](grafana/dashboard_ssv_node.json) +* [Operator performance monitoring](grafana/dashboard_ssv_operator_performance.json.json) -
+The dashboards leverage Grafana templating so that one can select different datasources, the Grafana SSV operators are inferred from the Prometheus metrics, so if you spin up more SSV operators, they will show up on the dashboard seamlessly. -### Profiling +--- +## Profiling -Profiling can be enabled via config: +Profiling can be enabled in the node configuration file (`config.yaml`): ```yaml EnableProfile: true ``` +> Note: remember to restart the node after changing its configuration All the default `pprof` routes are available via HTTP: ```shell diff --git a/monitoring/grafana/dashboard_ssv_node.json b/monitoring/grafana/dashboard_ssv_node.json index 47150acf91..8cd88a8d37 100644 --- a/monitoring/grafana/dashboard_ssv_node.json +++ b/monitoring/grafana/dashboard_ssv_node.json @@ -1,9 +1,57 @@ { + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.2.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph (old)", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -21,12 +69,16 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 115, - "iteration": 1696933836051, + "id": null, "links": [], "liveNow": false, "panels": [ { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "gridPos": { "h": 1, "w": 24, @@ -34,13 +86,22 @@ "y": 0 }, "id": 20, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "refId": "A" + } + ], "title": "Node Health", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Current status of the beacon, ETH1 and SSV operator node", "fieldConfig": { @@ -149,6 +210,8 @@ }, "id": 42, "options": { + "minVizHeight": 75, + "minVizWidth": 75, "orientation": "auto", "reduceOptions": { "calcs": [ @@ -161,13 +224,14 @@ "showThresholdMarkers": true, "text": {} }, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, + "editorMode": "code", "exemplar": false, "expr": "ssv_beacon_status{instance=~\"$instance.*\"}", "instant": true, @@ -178,7 +242,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv_eth1_status{instance=~\"$instance.*\"}", @@ -191,7 +255,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "(ssv_node_status{instance=~\"$instance.*\"} + 1) or (absent(ssv_node_status{instance=~\"$instance.*\"}) * 0)", @@ -208,7 +272,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Health status of the beacon node", "fieldConfig": { @@ -217,6 +281,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -228,6 +295,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -298,11 +366,13 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "displayMode": "list", + "placement": "bottom", + "showLegend": false }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.4", @@ -310,7 +380,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_beacon_status{instance=~\"$instance.*\"}", @@ -325,7 +395,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Health status of the ETH1 node", "fieldConfig": { @@ -334,6 +404,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -345,6 +418,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -415,11 +489,13 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "displayMode": "list", + "placement": "bottom", + "showLegend": false }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.4", @@ -427,7 +503,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_eth1_status{instance=~\"$instance.*\"}", @@ -442,7 +518,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Health status of the SSV operator node", "fieldConfig": { @@ -451,6 +527,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -462,6 +541,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -525,11 +605,13 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "displayMode": "list", + "placement": "bottom", + "showLegend": false }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.4", @@ -537,7 +619,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_node_status{instance=~\"$instance.*\"} + 1", @@ -550,7 +632,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "absent(ssv_node_status{instance=~\"$instance.*\"}) * 0", @@ -565,6 +647,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, "gridPos": { "h": 1, "w": 24, @@ -573,6 +659,15 @@ }, "id": 18, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], "title": "Resource Usage", "type": "row" }, @@ -583,7 +678,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "decimals": 2, "description": "RAM memory usage of the SSV operator node", @@ -622,7 +717,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "pointradius": 5, "points": false, "renderer": "flot", @@ -634,7 +729,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (container_memory_working_set_bytes{image!=\"\", pod=~\"$instance.*\"}) by (pod)", @@ -685,7 +780,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Go memory usage of the SSV operator node", "fieldConfig": { @@ -694,6 +789,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -705,6 +803,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -751,14 +850,20 @@ "lastNotNull" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_sys_bytes{instance=~\"$instance.*\"}", "interval": "", @@ -766,6 +871,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_heap_idle_bytes{instance=~\"$instance.*\"}", "hide": false, @@ -774,6 +883,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_heap_inuse_bytes{instance=~\"$instance.*\"}", "hide": false, @@ -782,6 +895,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance.*\"}", "hide": false, @@ -800,7 +917,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Number of go routines running on the SSV operator node", "fieldConfig": { @@ -847,7 +964,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -857,6 +974,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_goroutines{instance=~\"$instance.*\"}", "format": "table", @@ -905,7 +1026,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "decimals": 3, "description": "CPU usage of the SSV operator node", @@ -951,7 +1072,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "pointradius": 5, "points": false, "renderer": "flot", @@ -961,6 +1082,10 @@ "steppedLine": true, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\", pod=~\"$instance.*\"}[1m])) by (pod)", "format": "time_series", @@ -1010,7 +1135,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Hard drive memory usage of the SSV operator node", "fieldConfig": { @@ -1019,6 +1144,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1030,6 +1158,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1076,14 +1205,20 @@ "last" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "((kubelet_volume_stats_used_bytes{persistentvolumeclaim=\"$instance\"} / kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=\"$instance\"}) * 100)", "hide": true, @@ -1092,6 +1227,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "kubelet_volume_stats_used_bytes{persistentvolumeclaim=\"$instance\"}", "format": "time_series", @@ -1110,7 +1249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Network input and output usage of the SSV operator node", "fieldConfig": { @@ -1119,6 +1258,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1130,6 +1272,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 2, "pointSize": 5, @@ -1180,10 +1323,12 @@ "max" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -1191,7 +1336,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate (container_network_receive_bytes_total{pod=~\"$instance.*\"}[1m])) by (pod)", @@ -1207,7 +1352,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate (container_network_transmit_bytes_total{pod=~\"$instance.*\"}[1m])) by (pod)", @@ -1226,6 +1371,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, "gridPos": { "h": 1, "w": 24, @@ -1234,13 +1383,22 @@ }, "id": 12, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], "title": "Network Discovery", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Number of connected peers to the SSV operator node", "fieldConfig": { @@ -1292,12 +1450,12 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_p2p_all_connected_peers{instance=~\"$instance.*\"}", @@ -1312,7 +1470,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Subnet peer distribution based on topic", "fieldConfig": { @@ -1321,6 +1479,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1332,6 +1493,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1379,17 +1541,19 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_p2p_connected_peers{instance=~\"$instance.*\"}", @@ -1404,7 +1568,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Subnet peers breakdown table of the SSV operator node", "fieldConfig": { @@ -1414,7 +1578,9 @@ }, "custom": { "align": "center", - "displayMode": "color-text", + "cellOptions": { + "type": "color-text" + }, "width": 150 }, "mappings": [], @@ -1424,8 +1590,7 @@ "mode": "absolute", "steps": [ { - "color": "#ccccdc", - "value": null + "color": "#ccccdc" } ] } @@ -1438,8 +1603,10 @@ }, "properties": [ { - "id": "custom.displayMode", - "value": "auto" + "id": "custom.cellOptions", + "value": { + "type": "auto" + } }, { "id": "unit", @@ -1463,8 +1630,7 @@ "mode": "absolute", "steps": [ { - "color": "#ccccdc", - "value": null + "color": "#ccccdc" }, { "color": "#ccccdc", @@ -1518,7 +1684,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:network:subnets:known{instance=~\"$instance.*\"}", @@ -1531,7 +1697,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:network:subnets:connected{instance=~\"$instance.*\"}", @@ -1545,7 +1711,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:network:subnets:my{instance=~\"$instance.*\"}", @@ -1617,7 +1783,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "The discovery rate of the subnet peers.\nFound is rate per second of nodes that were found with discovery. Rejected is rate per second of nodes that were found with discovery but rejected because of limit or subnet", "fieldConfig": { @@ -1658,8 +1824,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1681,7 +1846,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1691,7 +1857,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:network:discovery:found{instance=~\"$instance.*\"}[5m])", @@ -1702,7 +1868,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:network:discovery:rejected{instance=~\"$instance.*\"}[5m])", @@ -1718,7 +1884,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Table breakdown of all the SSV operator nodes", "fieldConfig": { @@ -1728,7 +1894,9 @@ }, "custom": { "align": "center", - "displayMode": "auto", + "cellOptions": { + "type": "auto" + }, "filterable": true }, "mappings": [], @@ -1736,8 +1904,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1898,7 +2065,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:network:peers_identity{instance=~\"$instance.*\"}[5m])", @@ -2035,6 +2202,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, "gridPos": { "h": 1, "w": 24, @@ -2043,13 +2214,22 @@ }, "id": 2, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], "title": "Network Messaging", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of inbound / outbound topic messages over the last 5 minutes", "fieldConfig": { @@ -2091,8 +2271,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "#EAB839", @@ -2114,7 +2293,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2125,7 +2305,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\"}[5m]))", @@ -2138,7 +2318,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\", msg_type=\"0\"}[5m]))", @@ -2152,7 +2332,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\", msg_type=\"1\"}[5m]))", @@ -2166,7 +2346,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\", msg_type=\"2\"}[5m]))", @@ -2180,7 +2360,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:out{instance=~\"$instance.*\"}[5m]))", @@ -2198,7 +2378,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of incoming topic messages for every topic over the last 5 minutes", "fieldConfig": { @@ -2240,8 +2420,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2268,7 +2447,8 @@ "mean" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2278,7 +2458,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate (ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\"}[5m])", @@ -2293,7 +2473,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of outgoing topic messages for every topic over the last 5 minutes.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", "fieldConfig": { @@ -2334,8 +2514,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2362,7 +2541,8 @@ "mean" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2372,7 +2552,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate (ssv:p2p:pubsub:msg:out{instance=~\"$instance.*\"}[5m])", @@ -2387,7 +2567,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Table breakdown for each stream protocol.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive Requests are outgoing.", "fieldConfig": { @@ -2397,15 +2577,16 @@ }, "custom": { "align": "auto", - "displayMode": "auto" + "cellOptions": { + "type": "auto" + } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2460,7 +2641,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:res{instance=~\"$instance.*\"}", @@ -2473,7 +2654,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:req:count{instance=~\"$instance.*\"}", @@ -2487,7 +2668,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:req:success{instance=~\"$instance.*\"}", @@ -2501,7 +2682,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:req:active{instance=~\"$instance.*\"}", @@ -2558,7 +2739,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of requests, responses and active requests.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", "fieldConfig": { @@ -2599,8 +2780,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2618,7 +2798,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2629,7 +2810,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:res{instance=~\"$instance.*\"}[5m])", @@ -2642,7 +2823,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:req:count{instance=~\"$instance.*\"}[5m])", @@ -2656,7 +2837,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:req:success{instance=~\"$instance.*\"}[5m])", @@ -2670,7 +2851,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:req:active{instance=~\"$instance.*\"}[5m])", @@ -2924,78 +3105,47 @@ } ], "refresh": "", - "schemaVersion": 34, - "style": "dark", + "schemaVersion": 38, "tags": [], "templating": { "list": [ { - "current": { - "selected": false, - "text": "ssv-node-v2-6", - "value": "ssv-node-v2-6" + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" }, + "definition": "label_values(ssv_beacon_status,instance)", "description": "", - "hide": 1, + "hide": 0, "includeAll": false, "multi": false, "name": "instance", - "options": [ - { - "selected": false, - "text": "ssv-node-v2-1", - "value": "ssv-node-v2-1" - }, - { - "selected": false, - "text": "ssv-node-v2-2", - "value": "ssv-node-v2-2" - }, - { - "selected": false, - "text": "ssv-node-v2-3", - "value": "ssv-node-v2-3" - }, - { - "selected": false, - "text": "ssv-node-v2-4", - "value": "ssv-node-v2-4" - }, - { - "selected": false, - "text": "ssv-node-v2-5", - "value": "ssv-node-v2-5" - }, - { - "selected": true, - "text": "ssv-node-v2-6", - "value": "ssv-node-v2-6" - }, - { - "selected": false, - "text": "ssv-node-v2-7", - "value": "ssv-node-v2-7" - }, - { - "selected": false, - "text": "ssv-node-v2-8", - "value": "ssv-node-v2-8" - }, - { - "selected": false, - "text": "ssv-exporter", - "value": "ssv-exporter" - }, - { - "selected": false, - "text": "ssv-exporter-v2", - "value": "ssv-exporter-v2" - } - ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-exporter,ssv-exporter-v2", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(ssv_beacon_status,instance)", + "refId": "instance" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", "queryValue": "", + "refresh": 1, + "regex": "", "skipUrlSync": false, - "type": "custom" + "type": "datasource" } ] }, @@ -3006,7 +3156,7 @@ "timepicker": {}, "timezone": "", "title": "Node Dashboard", - "uid": "QNiMrdoVz", - "version": 70, + "uid": "node_dashboard", + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_operator_performance.json b/monitoring/grafana/dashboard_ssv_operator_performance.json index ce769ee03d..d7248108e7 100644 --- a/monitoring/grafana/dashboard_ssv_operator_performance.json +++ b/monitoring/grafana/dashboard_ssv_operator_performance.json @@ -1,9 +1,39 @@ { + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.2.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -21,13 +51,16 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 117, - "iteration": 1676024010436, + "id": null, "links": [], "liveNow": false, "panels": [ { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -36,13 +69,22 @@ }, "id": 8, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], "title": "Operator Stats", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Total amount of validators and total active validators\n", "fieldConfig": { @@ -51,6 +93,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -62,6 +107,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -107,17 +153,19 @@ "last" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} == 3)", @@ -128,7 +176,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} != 9)", @@ -140,7 +188,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} == 6)", @@ -152,7 +200,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} == 9)", @@ -167,6 +215,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -175,13 +227,22 @@ }, "id": 6, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Attester Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -211,6 +272,9 @@ "id": 48, "options": { "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -219,14 +283,15 @@ "fields": "", "values": false }, - "showUnfilled": true + "showUnfilled": true, + "valueMode": "color" }, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", @@ -239,7 +304,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 1)", @@ -252,7 +317,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 2)", @@ -265,7 +330,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", @@ -282,7 +347,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -291,6 +356,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -302,6 +370,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -347,10 +416,12 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -358,7 +429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"ATTESTER\"}[5m]))", @@ -373,7 +444,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -382,6 +453,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -393,6 +467,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -434,10 +509,12 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -445,7 +522,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"ATTESTER\"}[5m]))", @@ -460,7 +537,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Attestation full flow duration excluding waiting for 1/3 of slot time and attestation data request. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -469,6 +546,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -480,6 +560,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -526,10 +607,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -537,7 +620,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -552,7 +635,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -564,7 +647,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -576,7 +659,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -588,7 +671,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -600,7 +683,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -612,7 +695,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -624,7 +707,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -640,7 +723,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Proposal stage duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -649,6 +732,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -660,6 +746,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -706,10 +793,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -717,7 +806,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))", @@ -732,7 +821,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -744,7 +833,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -756,7 +845,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -768,7 +857,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -780,7 +869,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -792,7 +881,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -804,7 +893,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"2.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -816,7 +905,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"2.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -828,7 +917,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -844,7 +933,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Prepare stage duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -853,6 +942,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -864,6 +956,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -910,10 +1003,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -921,7 +1016,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))", @@ -936,7 +1031,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -948,7 +1043,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -960,7 +1055,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -972,7 +1067,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -984,7 +1079,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -996,7 +1091,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1008,7 +1103,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"2.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1020,7 +1115,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"2.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1032,7 +1127,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1048,7 +1143,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Commit stage duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1057,6 +1152,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1068,6 +1166,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1114,10 +1213,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -1125,7 +1226,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))", @@ -1140,7 +1241,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1152,7 +1253,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1164,7 +1265,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1176,7 +1277,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1188,7 +1289,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1200,7 +1301,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1212,7 +1313,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"2.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1224,7 +1325,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"2.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1236,7 +1337,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1252,7 +1353,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for an attester role (duration from proposal to commits) broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1295,8 +1396,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1318,7 +1418,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1329,7 +1430,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -1344,7 +1445,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1356,7 +1457,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1368,7 +1469,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1380,7 +1481,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1392,7 +1493,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1404,7 +1505,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1416,7 +1517,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1432,7 +1533,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for an attester role (signature collection duration) broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1475,8 +1576,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1498,7 +1598,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1509,7 +1610,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -1524,7 +1625,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1536,7 +1637,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1548,7 +1649,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1560,7 +1661,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1572,7 +1673,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1584,7 +1685,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1596,7 +1697,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1612,7 +1713,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Attestation data request duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1655,8 +1756,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1678,7 +1778,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1689,7 +1790,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))", @@ -1704,7 +1805,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1716,7 +1817,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1728,7 +1829,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1740,7 +1841,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1752,7 +1853,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1764,7 +1865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1776,7 +1877,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1792,7 +1893,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Attestation submission duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1835,8 +1936,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1858,7 +1958,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1869,7 +1970,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -1884,7 +1985,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1896,7 +1997,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1908,7 +2009,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1920,7 +2021,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1932,7 +2033,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1944,7 +2045,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1956,7 +2057,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1971,6 +2072,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -1979,13 +2084,22 @@ }, "id": 4, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Proposer Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -1998,8 +2112,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2030,7 +2143,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", @@ -2043,7 +2156,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 1)", @@ -2056,7 +2169,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 2)", @@ -2069,7 +2182,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", @@ -2086,7 +2199,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -2128,8 +2241,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2151,7 +2263,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2162,7 +2275,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"PROPOSER\"}[5m]))", @@ -2177,7 +2290,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -2219,8 +2332,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2238,7 +2350,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2249,7 +2362,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"PROPOSER\"}[5m]))", @@ -2264,7 +2377,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2307,8 +2420,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2330,7 +2442,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2341,7 +2454,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -2356,7 +2469,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2368,7 +2481,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2380,7 +2493,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2392,7 +2505,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2404,7 +2517,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2416,7 +2529,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2428,7 +2541,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2444,7 +2557,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Beacon block request duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2487,8 +2600,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2510,7 +2622,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2521,7 +2634,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))", @@ -2536,7 +2649,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2548,7 +2661,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2560,7 +2673,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2572,7 +2685,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2584,7 +2697,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2596,7 +2709,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2608,7 +2721,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2624,7 +2737,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Block submission duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2667,8 +2780,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2690,7 +2802,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2701,7 +2814,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -2716,7 +2829,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2728,7 +2841,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2740,7 +2853,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2752,7 +2865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2764,7 +2877,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2776,7 +2889,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2788,7 +2901,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2804,7 +2917,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Pre-Consensus duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2847,8 +2960,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2870,7 +2982,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2881,7 +2994,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -2896,7 +3009,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2908,7 +3021,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2920,7 +3033,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2932,7 +3045,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2944,7 +3057,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2956,7 +3069,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2968,7 +3081,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2984,7 +3097,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3027,8 +3140,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3050,7 +3162,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3061,7 +3174,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -3076,7 +3189,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3088,7 +3201,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3100,7 +3213,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3112,7 +3225,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3124,7 +3237,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3136,7 +3249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3148,7 +3261,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3164,7 +3277,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3207,8 +3320,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3230,7 +3342,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3241,7 +3354,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -3256,7 +3369,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3268,7 +3381,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3280,7 +3393,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3292,7 +3405,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3304,7 +3417,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3316,7 +3429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3328,7 +3441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3343,6 +3456,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -3351,13 +3468,22 @@ }, "id": 2, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Aggregator Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -3370,8 +3496,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -3402,7 +3527,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", @@ -3415,7 +3540,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 1)", @@ -3428,7 +3553,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 2)", @@ -3441,7 +3566,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", @@ -3458,7 +3583,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -3500,8 +3625,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3523,7 +3647,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3534,7 +3659,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"AGGREGATOR\"}[5m]))", @@ -3549,7 +3674,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -3591,8 +3716,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -3610,7 +3734,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3621,7 +3746,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"AGGREGATOR\"}[5m]))", @@ -3636,7 +3761,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for an aggregator role excluding waiting for 2/3 of slot time, attestation data and aggregate attestation requests. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3679,8 +3804,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3702,7 +3826,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3713,7 +3838,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -3728,7 +3853,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3740,7 +3865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3752,7 +3877,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3764,7 +3889,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3776,7 +3901,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3788,7 +3913,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3800,7 +3925,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3816,7 +3941,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Aggregate attestation request duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3859,8 +3984,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3882,7 +4006,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3893,7 +4018,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))", @@ -3908,7 +4033,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3920,7 +4045,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3932,7 +4057,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3944,7 +4069,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3956,7 +4081,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3968,7 +4093,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3980,7 +4105,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3996,7 +4121,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Proof submission duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4039,8 +4164,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4062,7 +4186,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4073,7 +4198,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4088,7 +4213,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4100,7 +4225,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4112,7 +4237,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4124,7 +4249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4136,7 +4261,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4148,7 +4273,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4160,7 +4285,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4176,7 +4301,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Pre-Consensus duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4219,8 +4344,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4242,7 +4366,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4253,7 +4378,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4268,7 +4393,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4280,7 +4405,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4292,7 +4417,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4304,7 +4429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4316,7 +4441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4328,7 +4453,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4340,7 +4465,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4356,7 +4481,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4399,8 +4524,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4422,7 +4546,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4433,7 +4558,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4448,7 +4573,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4460,7 +4585,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4472,7 +4597,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4484,7 +4609,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4496,7 +4621,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4508,7 +4633,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4520,7 +4645,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4536,7 +4661,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4579,8 +4704,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4602,7 +4726,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4613,7 +4738,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4628,7 +4753,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4640,7 +4765,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4652,7 +4777,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4664,7 +4789,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4676,7 +4801,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4688,7 +4813,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4700,7 +4825,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4715,6 +4840,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -4723,13 +4852,22 @@ }, "id": 67, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Sync Committee Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -4742,8 +4880,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -4774,7 +4911,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", @@ -4787,7 +4924,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 1)", @@ -4800,7 +4937,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 2)", @@ -4813,7 +4950,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", @@ -4830,7 +4967,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -4872,8 +5009,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4895,7 +5031,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4906,7 +5043,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE\"}[5m]))", @@ -4921,7 +5058,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -4963,8 +5100,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -4982,7 +5118,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4993,7 +5130,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE\"}[5m]))", @@ -5008,7 +5145,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for a sync committee role excluding waiting for 1/3 of slot time and beacon block root request.. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5051,8 +5188,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5074,7 +5210,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5085,7 +5222,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5100,7 +5237,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5112,7 +5249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5124,7 +5261,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5136,7 +5273,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5148,7 +5285,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5160,7 +5297,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5172,7 +5309,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5188,7 +5325,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Beacon block root request duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5231,8 +5368,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5254,7 +5390,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5265,7 +5402,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))", @@ -5280,7 +5417,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5292,7 +5429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5304,7 +5441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5316,7 +5453,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5328,7 +5465,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5340,7 +5477,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5352,7 +5489,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5368,7 +5505,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Sync Message submission duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5411,8 +5548,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5434,7 +5570,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5445,7 +5582,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5460,7 +5597,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5472,7 +5609,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5484,7 +5621,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5496,7 +5633,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5508,7 +5645,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5520,7 +5657,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5532,7 +5669,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5548,7 +5685,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5591,8 +5728,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5614,7 +5750,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5625,7 +5762,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5640,7 +5777,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5652,7 +5789,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5664,7 +5801,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5676,7 +5813,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5688,7 +5825,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5700,7 +5837,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5712,7 +5849,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5728,7 +5865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5771,8 +5908,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5794,7 +5930,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5805,7 +5942,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5820,7 +5957,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5832,7 +5969,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5844,7 +5981,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5856,7 +5993,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5868,7 +6005,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5880,7 +6017,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5892,7 +6029,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5907,6 +6044,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -5915,13 +6056,22 @@ }, "id": 69, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Sync Committee Aggregator Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -5934,8 +6084,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -5966,7 +6115,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", @@ -5979,7 +6128,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 1)", @@ -5992,7 +6141,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 2)", @@ -6005,7 +6154,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", @@ -6022,7 +6171,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -6064,8 +6213,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6087,7 +6235,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6098,7 +6247,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE_CONTRIBUTION\"}[5m]))", @@ -6113,7 +6262,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -6155,8 +6304,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -6174,7 +6322,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6185,7 +6334,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE_CONTRIBUTION\"}[5m]))", @@ -6200,7 +6349,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for a sync committee aggregator role excluding waiting for slot time, beacon block root and sync committee contribution requests. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6243,8 +6392,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6266,7 +6414,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6277,7 +6426,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -6292,7 +6441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6304,7 +6453,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6316,7 +6465,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6328,7 +6477,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6340,7 +6489,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6352,7 +6501,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6364,7 +6513,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6380,7 +6529,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Sync committee contribution request duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6423,8 +6572,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6446,7 +6594,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6457,7 +6606,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))", @@ -6472,7 +6621,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6484,7 +6633,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6496,7 +6645,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6508,7 +6657,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6520,7 +6669,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6532,7 +6681,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6544,7 +6693,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6560,7 +6709,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Signed contribution and proof submission duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6603,8 +6752,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6626,7 +6774,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6637,7 +6786,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -6652,7 +6801,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6664,7 +6813,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6676,7 +6825,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6688,7 +6837,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6700,7 +6849,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6712,7 +6861,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6724,7 +6873,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6740,7 +6889,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Pre-Consensus duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6783,8 +6932,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6806,7 +6954,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6817,7 +6966,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -6832,7 +6981,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6844,7 +6993,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6856,7 +7005,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6868,7 +7017,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6880,7 +7029,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6892,7 +7041,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6904,7 +7053,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6920,7 +7069,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6963,8 +7112,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6986,7 +7134,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6997,7 +7146,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -7012,7 +7161,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7024,7 +7173,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7036,7 +7185,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7048,7 +7197,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7060,7 +7209,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7072,7 +7221,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7084,7 +7233,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7100,7 +7249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -7143,8 +7292,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -7166,7 +7314,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -7177,7 +7326,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -7192,7 +7341,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7204,7 +7353,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7216,7 +7365,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7228,7 +7377,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7240,7 +7389,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7252,7 +7401,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7264,7 +7413,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7278,118 +7427,48 @@ "type": "timeseries" } ], - "refresh": false, - "schemaVersion": 34, - "style": "dark", + "refresh": "", + "schemaVersion": 38, "tags": [], "templating": { "list": [ { - "current": { - "selected": false, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - "hide": 1, + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(ssv_beacon_status,instance)", + "description": "", + "hide": 0, "includeAll": false, "multi": false, "name": "instance", - "options": [ - { - "selected": false, - "text": "ssv-node-v2-1", - "value": "ssv-node-v2-1" - }, - { - "selected": false, - "text": "ssv-node-v2-2", - "value": "ssv-node-v2-2" - }, - { - "selected": false, - "text": "ssv-node-v2-3", - "value": "ssv-node-v2-3" - }, - { - "selected": false, - "text": "ssv-node-v2-4", - "value": "ssv-node-v2-4" - }, - { - "selected": false, - "text": "ssv-node-v2-5", - "value": "ssv-node-v2-5" - }, - { - "selected": false, - "text": "ssv-node-v2-6", - "value": "ssv-node-v2-6" - }, - { - "selected": false, - "text": "ssv-node-v2-7", - "value": "ssv-node-v2-7" - }, - { - "selected": false, - "text": "ssv-node-v2-8", - "value": "ssv-node-v2-8" - }, - { - "selected": false, - "text": "ssv-node-9", - "value": "ssv-node-9" - }, - { - "selected": false, - "text": "ssv-node-10", - "value": "ssv-node-10" - }, - { - "selected": false, - "text": "ssv-node-11", - "value": "ssv-node-11" - }, - { - "selected": false, - "text": "ssv-node-12", - "value": "ssv-node-12" - }, - { - "selected": false, - "text": "ssv-exporter", - "value": "ssv-exporter" - }, - { - "selected": false, - "text": "ssv-exporter-v2", - "value": "ssv-exporter-v2" - }, - { - "selected": true, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - { - "selected": false, - "text": "ssv-node-v3-2", - "value": "ssv-node-v3-2" - }, - { - "selected": false, - "text": "ssv-node-v3-3", - "value": "ssv-node-v3-3" - }, - { - "selected": false, - "text": "ssv-node-v3-4", - "value": "ssv-node-v3-4" - } - ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-node-9,ssv-node-10,ssv-node-11,ssv-node-12,ssv-exporter,ssv-exporter-v2,ssv-node-v3-1,ssv-node-v3-2,ssv-node-v3-3,ssv-node-v3-4", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(ssv_beacon_status,instance)", + "refId": "instance" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", "queryValue": "", + "refresh": 1, + "regex": "", "skipUrlSync": false, - "type": "custom" + "type": "datasource" } ] }, @@ -7400,7 +7479,7 @@ "timepicker": {}, "timezone": "", "title": "Operator Performance Dashboard", - "uid": "w-fXrOo4k", - "version": 95, + "uid": "operator_performance", + "version": 5, "weekStart": "" } \ No newline at end of file From 2409736e1e1b22e1c6e882821075eb38cc076dde Mon Sep 17 00:00:00 2001 From: rehs0y Date: Thu, 16 Nov 2023 15:31:26 +0200 Subject: [PATCH 42/54] fork to permissoinless handshake and message verification (#1204) * move permissioned epoch to network config * set test epoch for stage * set fork in the future * set forking times for all networks * fix left over tests * change holesky epoch * rename fork epoch property * use only one fork property for two of the forks, named PermissionlessActivationEpoch * fix test with correct if condition --- cli/operator/node.go | 5 ++--- message/validation/validation.go | 2 +- message/validation/validation_test.go | 2 +- network/p2p/config.go | 5 ----- network/p2p/metrics.go | 1 - network/p2p/p2p_pubsub.go | 2 +- network/p2p/p2p_setup.go | 3 +-- network/p2p/p2p_sync.go | 2 +- network/topics/msg_id.go | 2 +- networkconfig/config.go | 18 +++++++++--------- networkconfig/holesky-stage.go | 4 ++-- networkconfig/holesky.go | 3 ++- networkconfig/jato-v2.go | 2 +- networkconfig/local-testnet.go | 3 --- networkconfig/mainnet.go | 3 +-- networkconfig/test-network.go | 2 +- 16 files changed, 24 insertions(+), 35 deletions(-) diff --git a/cli/operator/node.go b/cli/operator/node.go index a6bca602c3..5d661179e6 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -130,8 +130,8 @@ var StartNodeCmd = &cobra.Command{ cfg.P2pNetworkConfig.Ctx = cmd.Context() permissioned := func() bool { - currentEpoch := uint64(networkConfig.Beacon.EstimatedCurrentEpoch()) - return currentEpoch >= cfg.P2pNetworkConfig.PermissionedActivateEpoch && currentEpoch < cfg.P2pNetworkConfig.PermissionedDeactivateEpoch + currentEpoch := networkConfig.Beacon.EstimatedCurrentEpoch() + return currentEpoch < networkConfig.PermissionlessActivationEpoch } slotTickerProvider := func() slotticker.SlotTicker { @@ -161,7 +161,6 @@ var StartNodeCmd = &cobra.Command{ } cfg.P2pNetworkConfig.Permissioned = permissioned - cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) cfg.P2pNetworkConfig.NodeStorage = nodeStorage cfg.P2pNetworkConfig.OperatorPubKeyHash = format.OperatorID(operatorData.PublicKey) cfg.P2pNetworkConfig.OperatorID = operatorData.ID diff --git a/message/validation/validation.go b/message/validation/validation.go index 4fc6b1cd76..d981e24e68 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -277,7 +277,7 @@ func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt var signatureVerifier func() error currentEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())) - if currentEpoch > mv.netCfg.RSAForkEpoch { + if currentEpoch > mv.netCfg.PermissionlessActivationEpoch { decMessageData, operatorID, signature, err := commons.DecodeSignedSSVMessage(messageData) messageData = decMessageData if err != nil { diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go index f7afaaa053..78c759556a 100644 --- a/message/validation/validation_test.go +++ b/message/validation/validation_test.go @@ -1734,7 +1734,7 @@ func Test_ValidateSSVMessage(t *testing.T) { // Get error when receiving an SSV message with an invalid signature. t.Run("signature verification", func(t *testing.T) { - var afterFork = netCfg.RSAForkEpoch + 1000 + var afterFork = netCfg.PermissionlessActivationEpoch + 1000 t.Run("unsigned message before fork", func(t *testing.T) { validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) diff --git a/network/p2p/config.go b/network/p2p/config.go index f30a44ae24..70829f200e 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -86,12 +86,7 @@ type Config struct { GetValidatorStats network.GetValidatorStats - PermissionedActivateEpoch uint64 `yaml:"PermissionedActivateEpoch" env:"PERMISSIONED_ACTIVE_EPOCH" env-default:"0" env-description:"On which epoch to start only accepting peers that are operators registered in the contract"` - PermissionedDeactivateEpoch uint64 `yaml:"PermissionedDeactivateEpoch" env:"PERMISSIONED_DEACTIVE_EPOCH" env-default:"99999999999999" env-description:"On which epoch to start accepting operators all peers"` - Permissioned func() bool // this is not loaded from config file but set up in full node setup - // WhitelistedOperatorKeys is an array of Operator Public Key PEMs not registered in the contract with which the node will accept connections - WhitelistedOperatorKeys []string `yaml:"WhitelistedOperatorKeys" env:"WHITELISTED_KEYS" env-description:"Operators' keys not registered in the contract with which the node will accept connections"` } // Libp2pOptions creates options list for the libp2p host diff --git a/network/p2p/metrics.go b/network/p2p/metrics.go index 10ba41304e..8073ce1cb6 100644 --- a/network/p2p/metrics.go +++ b/network/p2p/metrics.go @@ -96,7 +96,6 @@ func (n *p2pNetwork) reportPeerIdentity(logger *zap.Logger, pid peer.ID) { ni := n.idx.NodeInfo(pid) if ni != nil { if ni.Metadata != nil { - opPKHash = ni.Metadata.OperatorID nodeVersion = ni.Metadata.NodeVersion } nodeType = "operator" diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 233a4189d1..dc28fdffab 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -62,7 +62,7 @@ func (n *p2pNetwork) Broadcast(msg *spectypes.SSVMessage) error { return errors.Wrap(err, "could not decode msg") } - if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.RSAForkEpoch { + if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.PermissionlessActivationEpoch { hash := sha256.Sum256(encodedMsg) signature, err := rsa.SignPKCS1v15(nil, n.operatorPrivateKey, crypto.SHA256, hash[:]) diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index c4c3e546d0..e3aaaa9a71 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -169,7 +169,6 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { domain := "0x" + hex.EncodeToString(n.cfg.Network.Domain[:]) self := records.NewNodeInfo(domain) self.Metadata = &records.NodeMetadata{ - OperatorID: n.cfg.OperatorPubKeyHash, NodeVersion: commons.GetNodeVersion(), Subnets: records.Subnets(n.subnets).String(), } @@ -204,7 +203,7 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { filters = append(filters, connections.SenderRecipientIPsCheckFilter(n.host.ID()), connections.SignatureCheckFilter(), - connections.RegisteredOperatorsFilter(n.nodeStorage, n.cfg.WhitelistedOperatorKeys)) + connections.RegisteredOperatorsFilter(n.nodeStorage, n.cfg.Network.WhitelistedOperatorKeys)) } return filters } diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 1dd4fbd5ec..73c4b443b3 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -146,7 +146,7 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp continue } - if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.RSAForkEpoch { + if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.PermissionlessActivationEpoch { decodedMsg, _, _, err := commons.DecodeSignedSSVMessage(raw) if err != nil { logger.Debug("could not decode signed SSV message", zap.Error(err)) diff --git a/network/topics/msg_id.go b/network/topics/msg_id.go index b5d1efb777..e7cff81b2e 100644 --- a/network/topics/msg_id.go +++ b/network/topics/msg_id.go @@ -126,7 +126,7 @@ func (handler *msgIDHandler) MsgID(logger *zap.Logger) func(pmsg *ps_pb.Message) func (handler *msgIDHandler) pubsubMsgToMsgID(msg []byte) string { currentEpoch := handler.networkConfig.Beacon.EstimatedCurrentEpoch() - if currentEpoch > handler.networkConfig.RSAForkEpoch { + if currentEpoch > handler.networkConfig.PermissionlessActivationEpoch { decodedMsg, _, _, err := commons.DecodeSignedSSVMessage(msg) if err != nil { // todo: should err here or just log and let the decode function err? diff --git a/networkconfig/config.go b/networkconfig/config.go index 3499c04983..a69c40ad4d 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -30,15 +30,15 @@ func GetNetworkConfigByName(name string) (NetworkConfig, error) { } type NetworkConfig struct { - Name string - Beacon beacon.BeaconNetwork - Domain spectypes.DomainType - GenesisEpoch spec.Epoch - RegistrySyncOffset *big.Int - RegistryContractAddr string // TODO: ethcommon.Address - Bootnodes []string - WhitelistedOperatorKeys []string - RSAForkEpoch spec.Epoch + Name string + Beacon beacon.BeaconNetwork + Domain spectypes.DomainType + GenesisEpoch spec.Epoch + RegistrySyncOffset *big.Int + RegistryContractAddr string // TODO: ethcommon.Address + Bootnodes []string + WhitelistedOperatorKeys []string + PermissionlessActivationEpoch spec.Epoch } func (n NetworkConfig) String() string { diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go index bbecaef474..ed87c45179 100644 --- a/networkconfig/holesky-stage.go +++ b/networkconfig/holesky-stage.go @@ -18,6 +18,6 @@ var HoleskyStage = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QNUN0RdeoHjI4Np18-PX1VXrJ2rJMo2OarRz0wCAxiYlD3s_E4zsmXi1LHv62ULLBT-AQfZIjYefEoEsMDkaEKCGAYtCguORh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhKfrtCyJc2VjcDI1NmsxoQP2e508AoA0B-KH-IaAd3nVCfI9q16lNztV-oTpcH72tIN0Y3CCE4mDdWRwgg-h", }, - WhitelistedOperatorKeys: []string{}, - RSAForkEpoch: 0, + WhitelistedOperatorKeys: []string{}, + PermissionlessActivationEpoch: 10560, } diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go index ab5ded8293..4b1f36a9a7 100644 --- a/networkconfig/holesky.go +++ b/networkconfig/holesky.go @@ -18,5 +18,6 @@ var Holesky = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", }, - WhitelistedOperatorKeys: []string{}, + WhitelistedOperatorKeys: []string{}, + PermissionlessActivationEpoch: 13500, // Nov-27-2023 12:00:00 PM UTC } diff --git a/networkconfig/jato-v2.go b/networkconfig/jato-v2.go index 3e0cfe44c3..051826a858 100644 --- a/networkconfig/jato-v2.go +++ b/networkconfig/jato-v2.go @@ -33,5 +33,5 @@ var JatoV2 = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNmkwelNHRzFiaHlPZU8xVDVxc2UKOFpHbElBQ2pmemVYQzhpYVVReGVCb0dlVGRvN0tqalkwNy80b3hBNkhjdG45bEtxd1BodG5ISXIvZ1RlWXNYUwp5QVhPL1Q5K2RQcng1ZEp3SEVCdm5BcmNSQkNzaGF5Sng2S0xiZ3RJb2dGSWhkK1ptaFpiWFpWZVp5THhzK2tZCnM4djVwcHBIbWNwWHRwUVAxWm1ycndpTC9hZU5JNzczbUlrZ1pBOGdNK2Z5S2RtTGJrQXdXZWh1SXZKRmpuVCsKQlVkUHUzWGJIemU2SlJnY2NYNmZnM1gwOTJibG9VMzRxY1VIelNhWU9TZlc2TUpEbFgzQzJCeFhCZ042VFV0aQpDN2k2ZE9qaW14RzlSMkp4ZHVhZGpUeEM1MHl5OE9IVWpMVGNkc2pWRjdYNXdGUzFqaDI5aFpDY0FoeDB2NDg3CjdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNldITnNBdTdSYnMxM0I2c0taWXgKVnZuMldlTy9YMTdSeUx1MjA0K2VtbjkvSGhIRlhXT29CMGczekNZQWp2WWdsbFJka0laTWt3ZkFUNGZvVjVTKwpvNzFFQ1dFN1ZuaytxcWd0U3k5M0ZTTVJzUG9vNngrTUd4ZURBQ3RQbDdQV1EyTXJmV1hkNzVwV1p5TVd5VndHCktPbFo0RHhoQ0VOcXlRcndlOTkybU9wVDZBcTJ1TmVsUmdESUJDSW1CV01NcUl2aXdhSU96MlBmTWR1L3ZVTWgKcVFuNGJJZjFpcVk2WGlKU1g2bDJvUWlTb09VMjRvNkFCdHlHbzRpTDJXN2tOajVUa1hOOEVzeGc3WmUveVQ0YgpKNGtvVjdmNUE3dmpMbHc1ZkdjWDR1bTBNK1QwbnczUlVIY3pHK1E3U1VGMTFGU3c0VnM1WVBHWC84a2tzdXgyCkx3SURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, - RSAForkEpoch: 0, // todo: decide forking epoch + PermissionlessActivationEpoch: 220257, // Nov-27-2023 12:04:48 PM UTC } diff --git a/networkconfig/local-testnet.go b/networkconfig/local-testnet.go index f96cbbaeea..09e829453f 100644 --- a/networkconfig/local-testnet.go +++ b/networkconfig/local-testnet.go @@ -1,8 +1,6 @@ package networkconfig import ( - "math" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ) @@ -16,5 +14,4 @@ var LocalTestnet = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QLR4Y1VbwiqFYKy6m-WFHRNDjhMDZ_qJwIABu2PY9BHjIYwCKpTvvkVmZhu43Q6zVA29sEUhtz10rQjDJkK3Hd-GAYiGrW2Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQJTcI7GHPw-ZqIflPZYYDK_guurp_gsAFF5Erns3-PAvIN0Y3CCE4mDdWRwgg-h", }, - RSAForkEpoch: math.MaxUint64, } diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 1412f54ae3..3b22699a64 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -1,7 +1,6 @@ package networkconfig import ( - "math" "math/big" spectypes "github.com/bloxapp/ssv-spec/types" @@ -41,5 +40,5 @@ var Mainnet = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcU5Sd0xWSHNWMEloUjdjdUJkb1AKZnVwNTkydEJFSG0vRllDREZQbERMR2NVZ2NzZ29PdHBsV2hMRjBGSzIwQ3ppVi83WVZzcWpxcDh3VDExM3pBbQoxOTZZRlN6WmUzTFhOQXFRWlBwbDlpOVJxdVJJMGlBT2xiWUp0ampJRjd2ZVZLbVdybzMwWTZDV3JPcHpVQ1BPClRGVEpGZ0hvZmtQT2pabmprNURtdDg2ZURveUxzenJQZWQ0LzlyR2NNVUp4WnJBSjEvbFR1ajNaWWVJUk0wS04KUVQ0eitPb3p0T0dBeDVVcUk2THpQL3NGOWRJM3BzM3BIb3dXOWF2RHp3Qm94Y3hWam14NWhRMXowOTN4MnlkYgpWcjgxNDgzTzdqUkt6eFpXeEduOFJzZUROZkxwSi93VFJiQ0lVOFhwUC9IKzd6TWNGMG1HbVlUcjAvcWR1bVNsCjNRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdmRWVVJ0OFgxbFA5VDVSUUdYdVkKcFpZWjVBb3VuSEdUakMvQ1FoTmQ5RC9kT2kvSDUwVW1PdVBpTzhYYUF4UFRGcGIrZ2xCeGJRRHVQUGN1cENPdQpKN09lVTBvdzdsQjVMclZlWWt3RExnSHY3bDQwcjRWVTM3NlFueGhuS0JyVHNkaWdmZHJYUWZveGRhajVQQ0VYCnFjK1ozNXFPUmpCZ3dublRlbEJjc2NLMHorSkJaQzU0OXFOWThMbm9aMTBuRFptdW1YVDlac3dISCtJVkZacDYKMEZTY0k0V1V5U1gxVnJJT2tSandoSWlCSFk3YkhrZ01Bci9xeStuRmlFUUVRV2Q2VXAwOWtkS0hNVmdtVFp4KwprQXZRbFZ0Z3luYkFPWkNMeng0Ymo1Yi9MQklIejNiTk9zWlNtR3AxWi9hWDFkd1BaMlhOai83elovNGpuM095CkdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, - RSAForkEpoch: math.MaxUint64, // TODO: change epoch before forking mainnet + PermissionlessActivationEpoch: 248625, // Dec-11-2023 12:00:23 PM UTC } diff --git a/networkconfig/test-network.go b/networkconfig/test-network.go index 628c24227d..0b8d4ca067 100644 --- a/networkconfig/test-network.go +++ b/networkconfig/test-network.go @@ -18,5 +18,5 @@ var TestNetwork = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QO86ZMZr_INMW_WQBsP2jS56yjrHnZXxAUOKJz4_qFPKD1Cr3rghQD2FtXPk2_VPnJUi8BBiMngOGVXC0wTYpJGGAYgqnGSNh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQKNW0Mf-xTXcevRSkZOvoN0Q0T9OkTjGZQyQeOl3bYU3YN0Y3CCE4iDdWRwgg-g;enr:-Li4QBoH15fXLV78y1_nmD5sODveptALORh568iWLS_eju3SUvF2ZfGE2j-nERKU1zb2g5KlS8L70SRLdRUJ-pHH-fmGAYgvh9oGh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQO_tV3JP75ZUZPjhOgc2VqEu_FQEMeHc4AyOz6Lz33M2IN0Y3CCE4mDdWRwgg-h", }, - RSAForkEpoch: 123456789, + PermissionlessActivationEpoch: 123456789, } From 8116217d8de309309a2ce34423c3e41bc0aada0c Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 16 Nov 2023 14:43:43 +0100 Subject: [PATCH 43/54] Init fetch (#1205) * feat: wait for dutyscheduler to fetch the duties * change how init was done * fixed test and review comments * fix rest of the tests * deploy to stage * Revert "deploy to stage" This reverts commit 1ddade0b2049c1b7804dbbf6645973d35f7f91d6. --- operator/duties/attester_test.go | 39 +++++++++++++++++--------- operator/duties/base_handler.go | 5 ++++ operator/duties/base_handler_mock.go | 12 ++++++++ operator/duties/proposer.go | 6 ++++ operator/duties/proposer_test.go | 18 ++++++++---- operator/duties/scheduler.go | 6 ++++ operator/duties/scheduler_test.go | 21 +++++++++----- operator/duties/sync_committee.go | 11 ++++++++ operator/duties/sync_committee_test.go | 21 +++++++++----- operator/node.go | 12 ++++---- 10 files changed, 112 insertions(+), 39 deletions(-) diff --git a/operator/duties/attester_test.go b/operator/duties/attester_test.go index 4292ddf395..007e018e68 100644 --- a/operator/duties/attester_test.go +++ b/operator/duties/attester_test.go @@ -66,8 +66,9 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { @@ -102,8 +103,9 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { @@ -143,8 +145,9 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken mockTicker.Send(currentSlot.GetSlot()) @@ -200,8 +203,9 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken mockTicker.Send(currentSlot.GetSlot()) @@ -268,8 +272,9 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { @@ -348,8 +353,9 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { @@ -439,8 +445,9 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { @@ -518,8 +525,9 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { @@ -607,8 +615,9 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { @@ -694,8 +703,9 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { @@ -790,8 +800,9 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { @@ -840,8 +851,9 @@ func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(31)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { @@ -876,8 +888,9 @@ func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(13)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index c3c22ebbe2..f75e6a99fb 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -18,6 +18,7 @@ type ExecuteDutiesFunc func(logger *zap.Logger, duties []*spectypes.Duty) type dutyHandler interface { Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, slotticker.Provider, chan ReorgEvent, chan struct{}) HandleDuties(context.Context) + HandleInitialDuties(context.Context) Name() string } @@ -61,3 +62,7 @@ func (h *baseHandler) warnMisalignedSlotAndDuty(dutyType string) { h.logger.Debug("current slot and duty slot are not aligned, "+ "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", dutyType)) } + +func (b *baseHandler) HandleInitialDuties(context.Context) { + // Do nothing +} diff --git a/operator/duties/base_handler_mock.go b/operator/duties/base_handler_mock.go index 6177f369f3..4181282362 100644 --- a/operator/duties/base_handler_mock.go +++ b/operator/duties/base_handler_mock.go @@ -49,6 +49,18 @@ func (mr *MockdutyHandlerMockRecorder) HandleDuties(arg0 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleDuties", reflect.TypeOf((*MockdutyHandler)(nil).HandleDuties), arg0) } +// HandleInitialDuties mocks base method. +func (m *MockdutyHandler) HandleInitialDuties(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandleInitialDuties", arg0) +} + +// HandleInitialDuties indicates an expected call of HandleInitialDuties. +func (mr *MockdutyHandlerMockRecorder) HandleInitialDuties(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleInitialDuties", reflect.TypeOf((*MockdutyHandler)(nil).HandleInitialDuties), arg0) +} + // Name mocks base method. func (m *MockdutyHandler) Name() string { m.ctrl.T.Helper() diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index d65b25b0e1..31c4aa50bb 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -106,6 +106,12 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { } } +func (h *ProposerHandler) HandleInitialDuties(ctx context.Context) { + slot := h.network.Beacon.EstimatedCurrentSlot() + epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) + h.processFetching(ctx, epoch, slot) +} + func (h *ProposerHandler) processFetching(ctx context.Context, epoch phase0.Epoch, slot phase0.Slot) { ctx, cancel := context.WithDeadline(ctx, h.network.Beacon.GetSlotStartTime(slot+1).Add(100*time.Millisecond)) defer cancel() diff --git a/operator/duties/proposer_test.go b/operator/duties/proposer_test.go index 56860c3c0e..45d13f1454 100644 --- a/operator/duties/proposer_test.go +++ b/operator/duties/proposer_test.go @@ -62,8 +62,9 @@ func TestScheduler_Proposer_Same_Slot(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { @@ -94,8 +95,9 @@ func TestScheduler_Proposer_Diff_Slots(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { @@ -136,8 +138,9 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken ticker.Send(currentSlot.GetSlot()) @@ -198,8 +201,9 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { @@ -278,8 +282,9 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { @@ -353,8 +358,9 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index 0ee6979ff8..70870d60b8 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -143,6 +143,9 @@ type ReorgEvent struct { Current bool } +// Start initializes the Scheduler and begins its operation. +// Note: This function includes blocking operations, especially within the handler's HandleInitialDuties call, +// which will block until initial duties are fully handled. func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { logger = logger.Named(logging.NameDutyScheduler) logger.Info("duty scheduler started") @@ -178,6 +181,9 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { indicesChangeCh, ) + // This call is blocking + handler.HandleInitialDuties(ctx) + s.pool.Go(func(ctx context.Context) error { // Wait for the head event subscription to complete before starting the handler. handler.HandleDuties(ctx) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 3a98de7e7c..ba00907f1b 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -80,6 +80,7 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot time.Duration, context.CancelFunc, *pool.ContextPool, + func(), ) { ctrl := gomock.NewController(t) // A 200ms timeout ensures the test passes, even with mockSlotTicker overhead. @@ -143,16 +144,19 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EpochsPerSyncCommitteePeriod().Return(uint64(256)).AnyTimes() - err := s.Start(ctx, logger) - require.NoError(t, err) - // Create a pool to wait for the scheduler to finish. schedulerPool := pool.New().WithErrors().WithContext(ctx) - schedulerPool.Go(func(ctx context.Context) error { - return s.Wait() - }) - return s, logger, mockSlotService, timeout, cancel, schedulerPool + startFunction := func() { + err := s.Start(ctx, logger) + require.NoError(t, err) + + schedulerPool.Go(func(ctx context.Context) error { + return s.Wait() + }) + } + + return s, logger, mockSlotService, timeout, cancel, schedulerPool, startFunction } func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.Duty, executeDutiesCallSize int) { @@ -253,6 +257,9 @@ func TestScheduler_Run(t *testing.T) { mockDutyHandler1 := NewMockdutyHandler(ctrl) mockDutyHandler2 := NewMockdutyHandler(ctrl) + mockDutyHandler1.EXPECT().HandleInitialDuties(gomock.Any()).AnyTimes() + mockDutyHandler2.EXPECT().HandleInitialDuties(gomock.Any()).AnyTimes() + opts := &SchedulerOptions{ Ctx: ctx, BeaconNode: mockBeaconNode, diff --git a/operator/duties/sync_committee.go b/operator/duties/sync_committee.go index 03c2e60037..e5fb76a25e 100644 --- a/operator/duties/sync_committee.go +++ b/operator/duties/sync_committee.go @@ -132,6 +132,17 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { } } +func (h *SyncCommitteeHandler) HandleInitialDuties(ctx context.Context) { + slot := h.network.Beacon.EstimatedCurrentSlot() + epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) + period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + h.processFetching(ctx, period, slot) + // At the init time we may not have enough duties to fetch + // we should not set those values to false in processFetching() call + h.fetchNextPeriod = true + h.fetchCurrentPeriod = true +} + func (h *SyncCommitteeHandler) processFetching(ctx context.Context, period uint64, slot phase0.Slot) { ctx, cancel := context.WithDeadline(ctx, h.network.Beacon.GetSlotStartTime(slot+1).Add(100*time.Millisecond)) defer cancel() diff --git a/operator/duties/sync_committee_test.go b/operator/duties/sync_committee_test.go index b2ec6d5d8b..76d04f8a58 100644 --- a/operator/duties/sync_committee_test.go +++ b/operator/duties/sync_committee_test.go @@ -96,8 +96,9 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ { @@ -155,8 +156,9 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 49)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ { @@ -222,8 +224,9 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ { @@ -276,8 +279,9 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken ticker.Send(currentSlot.GetSlot()) @@ -334,8 +338,9 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ { @@ -406,8 +411,9 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ { @@ -486,8 +492,9 @@ func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ { diff --git a/operator/node.go b/operator/node.go index 746f2ae494..55b8fde4cf 100644 --- a/operator/node.go +++ b/operator/node.go @@ -139,6 +139,12 @@ func (n *operatorNode) Start(logger *zap.Logger) error { } }() + // Start the duty scheduler, and a background goroutine to crash the node + // in case there were any errors. + if err := n.dutyScheduler.Start(n.context, logger); err != nil { + return fmt.Errorf("failed to run duty scheduler: %w", err) + } + n.validatorsCtrl.StartNetworkHandlers() n.validatorsCtrl.StartValidators() go n.net.UpdateSubnets(logger) @@ -147,12 +153,6 @@ func (n *operatorNode) Start(logger *zap.Logger) error { go n.feeRecipientCtrl.Start(logger) go n.validatorsCtrl.UpdateValidatorMetaDataLoop() - // Start the duty scheduler, and a background goroutine to crash the node - // in case there were any errors. - if err := n.dutyScheduler.Start(n.context, logger); err != nil { - return fmt.Errorf("failed to run duty scheduler: %w", err) - } - if err := n.dutyScheduler.Wait(); err != nil { logger.Fatal("duty scheduler exited with error", zap.Error(err)) } From 5e06ba6d6d734eba122623975b50106c01ca40ed Mon Sep 17 00:00:00 2001 From: Lior Rutenberg Date: Mon, 20 Nov 2023 13:46:05 +0100 Subject: [PATCH 44/54] disable builder proposals configs for stage nodes (#1209) --- .k8/hetzner-stage/ssv-node-1-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-10-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-11-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-12-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-13-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-14-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-15-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-16-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-17-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-18-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-19-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-2-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-20-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-21-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-22-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-23-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-24-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-25-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-26-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-27-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-28-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-29-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-3-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-30-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-31-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-32-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-33-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-34-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-35-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-36-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-37-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-38-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-39-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-4-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-40-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-41-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-42-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-43-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-44-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-45-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-46-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-47-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-48-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-49-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-5-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-50-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-51-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-52-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-53-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-54-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-55-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-56-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-57-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-58-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-59-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-6-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-60-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-61-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-62-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-63-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-64-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-65-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-66-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-67-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-68-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-69-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-7-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-70-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-71-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-72-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-8-deployment.yml | 2 +- .k8/hetzner-stage/ssv-node-9-deployment.yml | 2 +- 72 files changed, 72 insertions(+), 72 deletions(-) diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml index 82717644ef..086be0d4f4 100644 --- a/.k8/hetzner-stage/ssv-node-1-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-1 diff --git a/.k8/hetzner-stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml index 38a9d42ef4..7f12a82051 100644 --- a/.k8/hetzner-stage/ssv-node-10-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-10 diff --git a/.k8/hetzner-stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml index fdba8e8e06..83b4bd283d 100644 --- a/.k8/hetzner-stage/ssv-node-11-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-11 diff --git a/.k8/hetzner-stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml index 39c53376d5..bb5a5364b4 100644 --- a/.k8/hetzner-stage/ssv-node-12-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-12 diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml index 45e8cd4c4f..22b6d23514 100644 --- a/.k8/hetzner-stage/ssv-node-13-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-13 diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml index ba020c030a..62cd8d850c 100644 --- a/.k8/hetzner-stage/ssv-node-14-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-14 diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml index 95ccb82b6d..52393fc8b7 100644 --- a/.k8/hetzner-stage/ssv-node-15-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-15 diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml index b7a2c083b2..611ac23afa 100644 --- a/.k8/hetzner-stage/ssv-node-16-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-16 diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml index 476514b747..4dfdcbe204 100644 --- a/.k8/hetzner-stage/ssv-node-17-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-17 diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml index c50e3a869a..3a6cc86755 100644 --- a/.k8/hetzner-stage/ssv-node-18-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-18 diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml index a5e0fff55a..6afc020c66 100644 --- a/.k8/hetzner-stage/ssv-node-19-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-19 diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml index 38158a10d5..3e7c411852 100644 --- a/.k8/hetzner-stage/ssv-node-2-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-2 diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml index 2c22aa5d10..e4a1bbe9dc 100644 --- a/.k8/hetzner-stage/ssv-node-20-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-20 diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml index cebae4fbe7..e91e607a6b 100644 --- a/.k8/hetzner-stage/ssv-node-21-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-21 diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml index 425703ca22..587cfcb02f 100644 --- a/.k8/hetzner-stage/ssv-node-22-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-22 diff --git a/.k8/hetzner-stage/ssv-node-23-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml index 203b439712..2b8bcf79fd 100644 --- a/.k8/hetzner-stage/ssv-node-23-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-23 diff --git a/.k8/hetzner-stage/ssv-node-24-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml index 5a8d052145..200c013aa5 100644 --- a/.k8/hetzner-stage/ssv-node-24-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-24 diff --git a/.k8/hetzner-stage/ssv-node-25-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml index f09b90cd66..32570b1800 100644 --- a/.k8/hetzner-stage/ssv-node-25-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-25 diff --git a/.k8/hetzner-stage/ssv-node-26-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml index 0bfde7769a..e1931ba6b4 100644 --- a/.k8/hetzner-stage/ssv-node-26-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-26 diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml index 1dc139c85a..cbda5608a0 100644 --- a/.k8/hetzner-stage/ssv-node-27-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-27 diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml index ac89f6e95d..dd3365c183 100644 --- a/.k8/hetzner-stage/ssv-node-28-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-28 diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml index 1193a78621..759c621ae6 100644 --- a/.k8/hetzner-stage/ssv-node-29-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-29 diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml index 18fbc20c03..1fe286bfce 100644 --- a/.k8/hetzner-stage/ssv-node-3-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-3 diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml index 4a5c58ab27..eed6293f41 100644 --- a/.k8/hetzner-stage/ssv-node-30-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-30 diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml index 00fd954c05..decc10d037 100644 --- a/.k8/hetzner-stage/ssv-node-31-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-31 diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml index 36f1090ec3..32dcbc1587 100644 --- a/.k8/hetzner-stage/ssv-node-32-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-32 diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml index 47a0b113f6..7bb8ee072e 100644 --- a/.k8/hetzner-stage/ssv-node-33-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-33 diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml index 387a834820..5ec7a5c1b6 100644 --- a/.k8/hetzner-stage/ssv-node-34-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-34 diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml index 043ddafb9b..6430c698f5 100644 --- a/.k8/hetzner-stage/ssv-node-35-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-35 diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml index b2c2e0a89d..a91c4fd23a 100644 --- a/.k8/hetzner-stage/ssv-node-36-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-36 diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml index 25b640e23e..ef6fe88f03 100644 --- a/.k8/hetzner-stage/ssv-node-37-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-37 diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml index 7f4858f2de..c2949533e8 100644 --- a/.k8/hetzner-stage/ssv-node-38-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-38 diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml index 322bca8ede..9b5e0dd6d5 100644 --- a/.k8/hetzner-stage/ssv-node-39-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-39 diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml index 0f713f256e..e14bf0186d 100644 --- a/.k8/hetzner-stage/ssv-node-4-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-4 diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml index baa40ea8b1..ab0f8f974a 100644 --- a/.k8/hetzner-stage/ssv-node-40-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-40 diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml index a066f20316..cc177afcf1 100644 --- a/.k8/hetzner-stage/ssv-node-41-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-41 diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml index 9a90886eda..635b268042 100644 --- a/.k8/hetzner-stage/ssv-node-42-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-42 diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml index c6c08613de..4731455412 100644 --- a/.k8/hetzner-stage/ssv-node-43-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-43 diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml index b533b6bcfc..b9b8b0c5a4 100644 --- a/.k8/hetzner-stage/ssv-node-44-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-44 diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml index dd4e94430d..6636204199 100644 --- a/.k8/hetzner-stage/ssv-node-45-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-45 diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml index 8ce5fc8625..9abe08db3b 100644 --- a/.k8/hetzner-stage/ssv-node-46-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-46 diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml index 20f13789b7..89964d30fb 100644 --- a/.k8/hetzner-stage/ssv-node-47-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-47 diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml index e750831e12..843835dd40 100644 --- a/.k8/hetzner-stage/ssv-node-48-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-48 diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml index 350802f021..5a557185dc 100644 --- a/.k8/hetzner-stage/ssv-node-49-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-49 diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml index c94a30acf9..94d184dec0 100644 --- a/.k8/hetzner-stage/ssv-node-5-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-5 diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml index 4e2bbebce8..0099320434 100644 --- a/.k8/hetzner-stage/ssv-node-50-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-50 diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml index 7e46ea6560..7933ca7218 100644 --- a/.k8/hetzner-stage/ssv-node-51-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-51 diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml index c676691f8c..46a23039c7 100644 --- a/.k8/hetzner-stage/ssv-node-52-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-52 diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml index 678301bc85..373b3fd9a5 100644 --- a/.k8/hetzner-stage/ssv-node-53-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-53 diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml index c91ec66e42..fa81104af3 100644 --- a/.k8/hetzner-stage/ssv-node-54-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-54 diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml index f2b44873e0..0c5f96d861 100644 --- a/.k8/hetzner-stage/ssv-node-55-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-55 diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml index bdb312eb7e..4980e56786 100644 --- a/.k8/hetzner-stage/ssv-node-56-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-56 diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml index 3eff5b03c4..6fea9bd5f3 100644 --- a/.k8/hetzner-stage/ssv-node-57-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-57 diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml index cdaf3bcb23..ba1175da79 100644 --- a/.k8/hetzner-stage/ssv-node-58-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-58 diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml index b7b1861792..2232660448 100644 --- a/.k8/hetzner-stage/ssv-node-59-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-59 diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml index 3bfbc7ed26..945c0a7779 100644 --- a/.k8/hetzner-stage/ssv-node-6-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-6 diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml index a7a7285a6d..28536a9fd9 100644 --- a/.k8/hetzner-stage/ssv-node-60-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-60 diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml index 6ac244e496..a4802318f7 100644 --- a/.k8/hetzner-stage/ssv-node-61-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-61 diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml index d257378b74..3ceb7303cf 100644 --- a/.k8/hetzner-stage/ssv-node-62-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-62 diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml index 43912423b9..e445668038 100644 --- a/.k8/hetzner-stage/ssv-node-63-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-63 diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml index 3a9f0fa5e0..41622ca0be 100644 --- a/.k8/hetzner-stage/ssv-node-64-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-64 diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml index 837cff925a..5390b92509 100644 --- a/.k8/hetzner-stage/ssv-node-65-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-65 diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml index f76842606c..d6e86a35bc 100644 --- a/.k8/hetzner-stage/ssv-node-66-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-66 diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml index d9305f5293..047e8c1b84 100644 --- a/.k8/hetzner-stage/ssv-node-67-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-67 diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml index 566fcdc221..777a5ca07a 100644 --- a/.k8/hetzner-stage/ssv-node-68-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-68 diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml index 29f507ea95..9d7fd2bbaa 100644 --- a/.k8/hetzner-stage/ssv-node-69-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-69 diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml index 8493eb8870..358fa28811 100644 --- a/.k8/hetzner-stage/ssv-node-7-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-7 diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml index a501a099ed..5649051e6c 100644 --- a/.k8/hetzner-stage/ssv-node-70-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-70 diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml index b6c1bfa74b..ea98ca411c 100644 --- a/.k8/hetzner-stage/ssv-node-71-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-71 diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml index 33c5a2d0ce..ba1a8d2f07 100644 --- a/.k8/hetzner-stage/ssv-node-72-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-72 diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml index 1f13447479..a19ef9795b 100644 --- a/.k8/hetzner-stage/ssv-node-8-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-8 diff --git a/.k8/hetzner-stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml index 04f979521c..c2868e46c5 100644 --- a/.k8/hetzner-stage/ssv-node-9-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -115,7 +115,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-9 From f37d359d1535b6b7cada8dfe7b6c06d6a31f5e7d Mon Sep 17 00:00:00 2001 From: rehs0y Date: Mon, 20 Nov 2023 15:05:10 +0200 Subject: [PATCH 45/54] Feat/ log branch and commit ver (#1208) * add branch and commit to build data * CR comments about better log formatting. * fix log to not include ssv-node twice --- Makefile | 2 +- cli/bootnode/boot_node.go | 7 +++++++ cli/operator/node.go | 12 +++++++----- cmd/ssvnode/main.go | 20 +++++++++++++------- 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 9fc80cecde..62122fa53b 100644 --- a/Makefile +++ b/Makefile @@ -90,7 +90,7 @@ docker-integration-test: #Build .PHONY: build build: - CGO_ENABLED=1 go build -o ./bin/ssvnode -ldflags "-X main.Version=`git describe --tags $(git rev-list --tags --max-count=1)`" ./cmd/ssvnode/ + CGO_ENABLED=1 go build -o ./bin/ssvnode -ldflags "-X main.Commit=`git rev-parse HEAD` -X main.Branch=`git symbolic-ref --short HEAD` -X main.Version=`git describe --tags $(git rev-list --tags --max-count=1)`" ./cmd/ssvnode/ .PHONY: start-node start-node: diff --git a/cli/bootnode/boot_node.go b/cli/bootnode/boot_node.go index ddf69d71e4..ae9a767aa7 100644 --- a/cli/bootnode/boot_node.go +++ b/cli/bootnode/boot_node.go @@ -1,6 +1,8 @@ package bootnode import ( + "fmt" + "github.com/bloxapp/ssv/utils/commons" "log" "github.com/bloxapp/ssv/logging" @@ -27,6 +29,8 @@ var StartBootNodeCmd = &cobra.Command{ Use: "start-boot-node", Short: "Starts boot node for discovery based ENR", Run: func(cmd *cobra.Command, args []string) { + commons.SetBuildData(cmd.Parent().Short, cmd.Parent().Version) + if err := cleanenv.ReadConfig(globalArgs.ConfigPath, &cfg); err != nil { log.Fatal(err) } @@ -41,12 +45,15 @@ var StartBootNodeCmd = &cobra.Command{ MaxBackups: cfg.LogFileBackups, }, ) + if err != nil { log.Fatal(err) } logger := zap.L() + logger.Info(fmt.Sprintf("starting %v", commons.GetBuildData())) + bootNode, err := bootnode.New(cfg.Options) if err != nil { logger.Fatal("failed to set up boot node", zap.Error(err)) diff --git a/cli/operator/node.go b/cli/operator/node.go index 5d661179e6..5fdb49e192 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -93,12 +93,17 @@ var StartNodeCmd = &cobra.Command{ Use: "start-node", Short: "Starts an instance of SSV node", Run: func(cmd *cobra.Command, args []string) { - logger, err := setupGlobal(cmd) + commons.SetBuildData(cmd.Parent().Short, cmd.Parent().Version) + + logger, err := setupGlobal() if err != nil { log.Fatal("could not create logger", err) } + defer logging.CapturePanic(logger) + logger.Info(fmt.Sprintf("starting %v", commons.GetBuildData())) + metricsReporter := metricsreporter.New( metricsreporter.WithLogger(logger), ) @@ -341,10 +346,7 @@ func init() { global_config.ProcessArgs(&cfg, &globalArgs, StartNodeCmd) } -func setupGlobal(cmd *cobra.Command) (*zap.Logger, error) { - commons.SetBuildData(cmd.Parent().Short, cmd.Parent().Version) - log.Printf("starting SSV node (version %s)", commons.GetBuildData()) - +func setupGlobal() (*zap.Logger, error) { if globalArgs.ConfigPath != "" { if err := cleanenv.ReadConfig(globalArgs.ConfigPath, &cfg); err != nil { return nil, fmt.Errorf("could not read config: %w", err) diff --git a/cmd/ssvnode/main.go b/cmd/ssvnode/main.go index 21e68f5e19..621dc8b83c 100644 --- a/cmd/ssvnode/main.go +++ b/cmd/ssvnode/main.go @@ -1,17 +1,23 @@ package main import ( + "fmt" "github.com/bloxapp/ssv/cli" ) -var ( - // AppName is the application name - AppName = "SSV-Node" +// AppName is the application name +var AppName = "SSV-Node" - // Version is the app version - Version = "latest" -) +// Version is the app version +var Version = "latest" + +// Branch is the git branch this version was built on +var Branch = "main" + +// Commit is the git commit this version was built on +var Commit = "unknown" func main() { - cli.Execute(AppName, Version) + version := fmt.Sprintf("%s-%s-%s", Version, Branch, Commit) + cli.Execute(AppName, version) } From 33e3506a923e2cd035cab8285eb323e24aad918b Mon Sep 17 00:00:00 2001 From: rehs0y Date: Mon, 20 Nov 2023 22:00:09 +0200 Subject: [PATCH 46/54] Score parameters for banning peers sending rejected messages (#1206) * Modify gossipsub score parameters * Comment unused * Fix infinity value * Fix P3 weight * wip * fix lint * revert gitlab * fix test according to new keys functions * delete batch verifier * fix messages for the test * fix races in test * more data races * Bring back opkhash metric * lock for the table as well * dont report metrics in tests * dont panic when callign list ops * Refactor score params * Increase positive score increment * make sure all tests stop using resources * Remove unused. Use config value * peer score logs only topics with rejected msgs * deploy for testing * don't deploy anything * show peer score only every 5 minutes --------- Co-authored-by: MatheusFranco99 <48058141+MatheusFranco99@users.noreply.github.com> Co-authored-by: moshe-blox Co-authored-by: Lior Rutenberg --- go.mod | 1 + go.sum | 2 + integration/qbft/tests/setup_test.go | 6 +- logging/fields/fields.go | 5 + message/validation/validation.go | 17 + network/p2p/config.go | 8 + network/p2p/metrics.go | 1 + network/p2p/p2p.go | 10 +- network/p2p/p2p_setup.go | 5 + network/p2p/p2p_test.go | 75 ++-- network/p2p/p2p_validation_test.go | 372 ++++++++++++++++++ network/p2p/test_utils.go | 71 +++- .../peers/connections/mock/mock_storage.go | 2 +- network/testing/local.go | 4 +- network/topics/params/helpers.go | 12 +- network/topics/params/peer_score.go | 83 ++-- network/topics/params/scores_test.go | 2 +- network/topics/params/topic_score.go | 236 +++++++---- network/topics/pubsub.go | 4 +- network/topics/scoring.go | 28 +- 20 files changed, 750 insertions(+), 194 deletions(-) create mode 100644 network/p2p/p2p_validation_test.go diff --git a/go.mod b/go.mod index 7be627fb25..c40bccbcde 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/herumi/bls-eth-go-binary v1.29.1 github.com/ilyakaznacheev/cleanenv v1.4.2 + github.com/jamiealquiza/tachymeter v2.0.0+incompatible github.com/jellydator/ttlcache/v3 v3.0.1 github.com/libp2p/go-libp2p v0.28.2 github.com/libp2p/go-libp2p-kad-dht v0.23.0 diff --git a/go.sum b/go.sum index 527b7eea50..5eb22ec3c0 100644 --- a/go.sum +++ b/go.sum @@ -367,6 +367,8 @@ github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0Gqw github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jamiealquiza/tachymeter v2.0.0+incompatible h1:mGiF1DGo8l6vnGT8FXNNcIXht/YmjzfraiUprXYwJ6g= +github.com/jamiealquiza/tachymeter v2.0.0+incompatible/go.mod h1:Ayf6zPZKEnLsc3winWEXJRkTBhdHo58HODAu1oFJkYU= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= diff --git a/integration/qbft/tests/setup_test.go b/integration/qbft/tests/setup_test.go index f8c4222dbc..d319c44793 100644 --- a/integration/qbft/tests/setup_test.go +++ b/integration/qbft/tests/setup_test.go @@ -42,7 +42,11 @@ func TestMain(m *testing.M) { types.SetDefaultDomain(testingutils.TestingSSVDomainType) - ln, err := p2pv1.CreateAndStartLocalNet(ctx, logger, maxSupportedCommittee, maxSupportedQuorum, false) + ln, err := p2pv1.CreateAndStartLocalNet(ctx, logger, p2pv1.LocalNetOptions{ + Nodes: maxSupportedCommittee, + MinConnected: maxSupportedQuorum, + UseDiscv5: false, + }) if err != nil { logger.Fatal("error creating and start local net", zap.Error(err)) return diff --git a/logging/fields/fields.go b/logging/fields/fields.go index 3584f07915..94ca995621 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -68,6 +68,7 @@ const ( FieldOperatorPubKey = "operator_pubkey" FieldOwnerAddress = "owner_address" FieldPeerID = "peer_id" + FieldPeerScore = "peer_score" FieldPrivKey = "privkey" FieldPubKey = "pubkey" FieldRole = "role" @@ -155,6 +156,10 @@ func PeerID(val peer.ID) zapcore.Field { return zap.Stringer(FieldPeerID, val) } +func PeerScore(val float64) zapcore.Field { + return zap.Stringer(FieldPeerScore, stringer.Float64Stringer{Val: val}) +} + func BindIP(val net.IP) zapcore.Field { return zap.Stringer(FieldBindIP, val) } diff --git a/message/validation/validation.go b/message/validation/validation.go index d981e24e68..dfaa93c6f9 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -77,6 +77,8 @@ type messageValidator struct { dutyStore *dutystore.Store ownOperatorID spectypes.OperatorID operatorIDToPubkeyCache *hashmap.Map[spectypes.OperatorID, *rsa.PublicKey] + selfPID peer.ID + selfAccept bool } // NewMessageValidator returns a new MessageValidator with the given network configuration and options. @@ -133,6 +135,14 @@ func WithNodeStorage(nodeStorage operatorstorage.Storage) Option { } } +// WithSelfAccept blindly accepts messages sent from self. Useful for testing. +func WithSelfAccept(selfPID peer.ID, selfAccept bool) Option { + return func(mv *messageValidator) { + mv.selfPID = selfPID + mv.selfAccept = selfAccept + } +} + // ConsensusDescriptor provides details about the consensus for a message. It's used for logging and metrics. type ConsensusDescriptor struct { Round specqbft.Round @@ -212,6 +222,13 @@ func (mv *messageValidator) ValidatorForTopic(_ string) func(ctx context.Context // ValidatePubsubMessage validates the given pubsub message. // Depending on the outcome, it will return one of the pubsub validation results (Accept, Ignore, or Reject). func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, peerID peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + if mv.selfAccept && peerID == mv.selfPID { + msg, _ := commons.DecodeNetworkMsg(pmsg.Data) + decMsg, _ := queue.DecodeSSVMessage(msg) + pmsg.ValidatorData = decMsg + return pubsub.ValidationAccept + } + start := time.Now() var validationDurationLabels []string // TODO: implement diff --git a/network/p2p/config.go b/network/p2p/config.go index 70829f200e..9dd29ca7fc 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -10,6 +10,8 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/security/noise" libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp" ma "github.com/multiformats/go-multiaddr" @@ -87,6 +89,12 @@ type Config struct { GetValidatorStats network.GetValidatorStats Permissioned func() bool // this is not loaded from config file but set up in full node setup + + // PeerScoreInspector is called periodically to inspect the peer scores. + PeerScoreInspector func(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) + + // PeerScoreInspectorInterval is the interval at which the PeerScoreInspector is called. + PeerScoreInspectorInterval time.Duration } // Libp2pOptions creates options list for the libp2p host diff --git a/network/p2p/metrics.go b/network/p2p/metrics.go index 8073ce1cb6..10ba41304e 100644 --- a/network/p2p/metrics.go +++ b/network/p2p/metrics.go @@ -96,6 +96,7 @@ func (n *p2pNetwork) reportPeerIdentity(logger *zap.Logger, pid peer.ID) { ni := n.idx.NodeInfo(pid) if ni != nil { if ni.Metadata != nil { + opPKHash = ni.Metadata.OperatorID nodeVersion = ni.Metadata.NodeVersion } nodeType = "operator" diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 8e455d3701..e959fab776 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -165,12 +165,14 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { go n.startDiscovery(logger) async.Interval(n.ctx, connManagerGCInterval, n.peersBalancing(logger)) + // don't report metrics in tests + if n.cfg.Metrics != nil { + async.Interval(n.ctx, peersReportingInterval, n.reportAllPeers(logger)) - async.Interval(n.ctx, peersReportingInterval, n.reportAllPeers(logger)) + async.Interval(n.ctx, peerIdentitiesReportingInterval, n.reportPeerIdentities(logger)) - async.Interval(n.ctx, peerIdentitiesReportingInterval, n.reportPeerIdentities(logger)) - - async.Interval(n.ctx, topicsReportingInterval, n.reportTopics(logger)) + async.Interval(n.ctx, topicsReportingInterval, n.reportTopics(logger)) + } if err := n.subscribeToSubnets(logger); err != nil { return err diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index e3aaaa9a71..f680096f8b 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -289,6 +289,11 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { GetValidatorStats: n.cfg.GetValidatorStats, } + if n.cfg.PeerScoreInspector != nil && n.cfg.PeerScoreInspectorInterval > 0 { + cfg.ScoreInspector = n.cfg.PeerScoreInspector + cfg.ScoreInspectorInterval = n.cfg.PeerScoreInspectorInterval + } + if !n.cfg.PubSubScoring { cfg.ScoreIndex = nil } diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index f7275dc094..208a8bed4a 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -96,22 +96,29 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170"} - - ln, routers, err := createNetworkAndSubscribe(t, ctx, n, pks...) + ln, routers, err := createNetworkAndSubscribe(t, ctx, LocalNetOptions{ + Nodes: n, + MinConnected: n/2 - 1, + UseDiscv5: false, + }, pks...) require.NoError(t, err) require.NotNil(t, routers) require.NotNil(t, ln) + defer func() { + for _, node := range ln.Nodes { + require.NoError(t, node.(*p2pNetwork).Close()) + } + }() + node1, node2 := ln.Nodes[1], ln.Nodes[2] var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - msg1, err := dummyMsg(pks[0], 1) - require.NoError(t, err) - msg3, err := dummyMsg(pks[0], 3) - require.NoError(t, err) + msg1 := dummyMsgAttester(t, pks[0], 1) + msg3 := dummyMsgAttester(t, pks[0], 3) require.NoError(t, node1.Broadcast(msg1)) <-time.After(time.Millisecond * 10) require.NoError(t, node2.Broadcast(msg3)) @@ -122,11 +129,9 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - msg1, err := dummyMsg(pks[0], 1) - require.NoError(t, err) - msg2, err := dummyMsg(pks[1], 2) - require.NoError(t, err) - msg3, err := dummyMsg(pks[0], 3) + msg1 := dummyMsgAttester(t, pks[0], 1) + msg2 := dummyMsgAttester(t, pks[1], 2) + msg3 := dummyMsgAttester(t, pks[0], 3) require.NoError(t, err) <-time.After(time.Millisecond * 10) require.NoError(t, node1.Broadcast(msg2)) @@ -156,10 +161,6 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { } <-time.After(time.Millisecond * 10) - - for _, node := range ln.Nodes { - require.NoError(t, node.(*p2pNetwork).Close()) - } } func TestP2pNetwork_Stream(t *testing.T) { @@ -170,7 +171,17 @@ func TestP2pNetwork_Stream(t *testing.T) { pkHex := "b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400" - ln, _, err := createNetworkAndSubscribe(t, ctx, n, pkHex) + ln, _, err := createNetworkAndSubscribe(t, ctx, LocalNetOptions{ + Nodes: n, + MinConnected: n/2 - 1, + UseDiscv5: false, + }, pkHex) + + defer func() { + for _, node := range ln.Nodes { + require.NoError(t, node.(*p2pNetwork).Close()) + } + }() require.NoError(t, err) require.Len(t, ln.Nodes, n) @@ -208,6 +219,7 @@ func TestP2pNetwork_Stream(t *testing.T) { require.GreaterOrEqual(t, len(res), 2) // got at least 2 results require.LessOrEqual(t, len(res), 6) // less than 6 unique heights require.GreaterOrEqual(t, msgCounter, int64(2)) + } func TestWaitSubsetOfPeers(t *testing.T) { @@ -314,19 +326,20 @@ func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes. }) } -func createNetworkAndSubscribe(t *testing.T, ctx context.Context, nodes int, pks ...string) (*LocalNet, []*dummyRouter, error) { - logger := logging.TestLogger(t) - ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), nodes, nodes/2-1, false) +func createNetworkAndSubscribe(t *testing.T, ctx context.Context, options LocalNetOptions, pks ...string) (*LocalNet, []*dummyRouter, error) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), options) if err != nil { return nil, nil, err } - if len(ln.Nodes) != nodes { - return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), nodes) + if len(ln.Nodes) != options.Nodes { + return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), options.Nodes) } logger.Debug("created local network") - routers := make([]*dummyRouter, nodes) + routers := make([]*dummyRouter, options.Nodes) for i, node := range ln.Nodes { routers[i] = &dummyRouter{ i: i, @@ -384,12 +397,10 @@ func (r *dummyRouter) Route(_ context.Context, _ *queue.DecodedSSVMessage) { atomic.AddUint64(&r.count, 1) } -func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { +func dummyMsg(t *testing.T, pkHex string, height int, role spectypes.BeaconRole) *spectypes.SSVMessage { pk, err := hex.DecodeString(pkHex) - if err != nil { - return nil, err - } - id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + require.NoError(t, err) + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, role) signedMsg := &specqbft.SignedMessage{ Message: specqbft.Message{ MsgType: specqbft.CommitMsgType, @@ -402,12 +413,14 @@ func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { Signers: []spectypes.OperatorID{1, 3, 4}, } data, err := signedMsg.Encode() - if err != nil { - return nil, err - } + require.NoError(t, err) return &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, Data: data, - }, nil + } +} + +func dummyMsgAttester(t *testing.T, pkHex string, height int) *spectypes.SSVMessage { + return dummyMsg(t, pkHex, height, spectypes.BNRoleAttester) } diff --git a/network/p2p/p2p_validation_test.go b/network/p2p/p2p_validation_test.go new file mode 100644 index 0000000000..2d8292a9ed --- /dev/null +++ b/network/p2p/p2p_validation_test.go @@ -0,0 +1,372 @@ +package p2pv1 + +import ( + "context" + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "github.com/cornelk/hashmap" + "os" + "sort" + "sync" + "sync/atomic" + "testing" + "time" + + "math/rand" + + "github.com/aquasecurity/table" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcegraph/conc/pool" + "github.com/stretchr/testify/require" +) + +// TestP2pNetwork_MessageValidation tests p2pNetwork would score peers according +// to the validity of the messages they broadcast. +// +// This test creates 4 nodes, each fulfilling a different role by broadcasting +// messages that would be accepted, ignored or rejected by the other nodes, +// and finally asserts that each node scores it's peers according to their +// played role (accepted > ignored > rejected). +func TestP2pNetwork_MessageValidation(t *testing.T) { + const ( + nodeCount = 4 + validatorCount = 20 + ) + var vNet *VirtualNet + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create 20 fake validator public keys. + validators := make([]string, validatorCount) + for i := 0; i < validatorCount; i++ { + var validator [48]byte + cryptorand.Read(validator[:]) + validators[i] = hex.EncodeToString(validator[:]) + } + + // Create a MessageValidator to accept/reject/ignore messages according to their role type. + const ( + acceptedRole = spectypes.BNRoleProposer + ignoredRole = spectypes.BNRoleAttester + rejectedRole = spectypes.BNRoleSyncCommittee + ) + messageValidators := make([]*MockMessageValidator, nodeCount) + var mtx sync.Mutex + for i := 0; i < nodeCount; i++ { + i := i + messageValidators[i] = &MockMessageValidator{ + Accepted: make([]int, nodeCount), + Ignored: make([]int, nodeCount), + Rejected: make([]int, nodeCount), + } + messageValidators[i].ValidateFunc = func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + peer := vNet.NodeByPeerID(p) + + msg, err := commons.DecodeNetworkMsg(pmsg.Data) + require.NoError(t, err) + decodedMsg, err := queue.DecodeSSVMessage(msg) + require.NoError(t, err) + pmsg.ValidatorData = decodedMsg + mtx.Lock() + // Validation according to role. + var validation pubsub.ValidationResult + switch msg.MsgID.GetRoleType() { + case acceptedRole: + messageValidators[i].Accepted[peer.Index]++ + messageValidators[i].TotalAccepted++ + validation = pubsub.ValidationAccept + case ignoredRole: + messageValidators[i].Ignored[peer.Index]++ + messageValidators[i].TotalIgnored++ + validation = pubsub.ValidationIgnore + case rejectedRole: + messageValidators[i].Rejected[peer.Index]++ + messageValidators[i].TotalRejected++ + validation = pubsub.ValidationReject + default: + panic("unsupported role") + } + mtx.Unlock() + + // Always accept messages from self to make libp2p propagate them, + // while still counting them by their role. + if p == vNet.Nodes[i].Network.Host().ID() { + return pubsub.ValidationAccept + } + + return validation + } + } + + // Create a VirtualNet with 4 nodes. + vNet = CreateVirtualNet(t, ctx, 4, validators, func(nodeIndex int) validation.MessageValidator { + return messageValidators[nodeIndex] + }) + defer func() { + require.NoError(t, vNet.Close()) + }() + + // Prepare a pool of broadcasters. + mu := sync.Mutex{} + height := atomic.Int64{} + roleBroadcasts := map[spectypes.BeaconRole]int{} + broadcasters := pool.New().WithErrors().WithContext(ctx) + broadcaster := func(node *VirtualNode, roles ...spectypes.BeaconRole) { + broadcasters.Go(func(ctx context.Context) error { + for i := 0; i < 50; i++ { + role := roles[i%len(roles)] + + mu.Lock() + roleBroadcasts[role]++ + mu.Unlock() + + msg := dummyMsg(t, validators[rand.Intn(len(validators))], int(height.Add(1)), role) + err := node.Broadcast(msg) + if err != nil { + return err + } + time.Sleep(10 * time.Millisecond) + } + return nil + }) + } + + // Broadcast the messages: + // - node 0 broadcasts accepted messages. + // - node 1 broadcasts ignored messages. + // - node 2 broadcasts rejected messages. + // - node 3 broadcasts all messages (equal distribution). + broadcaster(vNet.Nodes[0], acceptedRole) + broadcaster(vNet.Nodes[1], ignoredRole) + broadcaster(vNet.Nodes[2], rejectedRole) + broadcaster(vNet.Nodes[3], acceptedRole, ignoredRole, rejectedRole) + + // Wait for the broadcasters to finish. + err := broadcasters.Wait() + require.NoError(t, err) + time.Sleep(500 * time.Millisecond) + + // Assert that the messages were distributed as expected. + deadline := time.Now().Add(5 * time.Second) + interval := 100 * time.Millisecond + for i := 0; i < nodeCount; i++ { + // better lock inside loop than wait interval locked + mtx.Lock() + var errors []error + if roleBroadcasts[acceptedRole] != messageValidators[i].TotalAccepted { + errors = append(errors, fmt.Errorf("node %d accepted %d messages (expected %d)", i, messageValidators[i].TotalAccepted, roleBroadcasts[acceptedRole])) + } + if roleBroadcasts[ignoredRole] != messageValidators[i].TotalIgnored { + errors = append(errors, fmt.Errorf("node %d ignored %d messages (expected %d)", i, messageValidators[i].TotalIgnored, roleBroadcasts[ignoredRole])) + } + if roleBroadcasts[rejectedRole] != messageValidators[i].TotalRejected { + errors = append(errors, fmt.Errorf("node %d rejected %d messages (expected %d)", i, messageValidators[i].TotalRejected, roleBroadcasts[rejectedRole])) + } + mtx.Unlock() + if len(errors) == 0 { + break + } + if time.Now().After(deadline) { + require.Empty(t, errors) + } + time.Sleep(interval) + } + + // Assert that each node scores it's peers according to the following order: + // - node 0, (node 1 OR 3), (node 1 OR 3), node 2 + // (after excluding itself from this list) + for _, node := range vNet.Nodes { + node := node + + // Prepare the valid orders, excluding the node itself. + validOrders := [][]NodeIndex{ + {0, 1, 3, 2}, + {0, 3, 1, 2}, + } + for i, validOrder := range validOrders { + for j, index := range validOrder { + if index == node.Index { + validOrders[i] = append(validOrders[i][:j], validOrders[i][j+1:]...) + break + } + } + } + + // Sort peers by their scores. + type peerScore struct { + index NodeIndex + score float64 + } + peers := make([]peerScore, 0, node.PeerScores.Len()) + node.PeerScores.Range(func(index NodeIndex, snapshot *pubsub.PeerScoreSnapshot) bool { + peers = append(peers, peerScore{index, snapshot.Score}) + return true + }) + sort.Slice(peers, func(i, j int) bool { + return peers[i].score > peers[j].score + }) + + // Print a pretty table of each node's peers and their scores. + defer func() { + tbl := table.New(os.Stdout) + tbl.SetHeaders("Peer", "Score", "Accepted", "Ignored", "Rejected") + mtx.Lock() + for _, peer := range peers { + tbl.AddRow( + fmt.Sprintf("%d", peer.index), + fmt.Sprintf("%.2f", peer.score), + fmt.Sprintf("%d", messageValidators[node.Index].Accepted[peer.index]), + fmt.Sprintf("%d", messageValidators[node.Index].Ignored[peer.index]), + fmt.Sprintf("%d", messageValidators[node.Index].Rejected[peer.index]), + ) + } + mtx.Unlock() + fmt.Println() + fmt.Printf("Peer Scores (Node %d)\n", node.Index) + tbl.Render() + }() + + // Assert that the peers are in one of the valid orders. + require.Equal(t, len(vNet.Nodes)-1, len(peers), "node %d", node.Index) + for i, validOrder := range validOrders { + valid := true + for j, peer := range peers { + if peer.index != validOrder[j] { + valid = false + break + } + } + if valid { + break + } + if i == len(validOrders)-1 { + require.Fail(t, "invalid order", "node %d", node.Index) + } + } + } + defer fmt.Println() +} + +type MockMessageValidator struct { + Accepted []int + Ignored []int + Rejected []int + TotalAccepted int + TotalIgnored int + TotalRejected int + + ValidateFunc func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} + +func (v *MockMessageValidator) ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return v.ValidatePubsubMessage +} + +func (v *MockMessageValidator) ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return v.ValidateFunc(ctx, p, pmsg) +} + +func (v *MockMessageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, validation.Descriptor, error) { + panic("not implemented") // TODO: Implement +} + +type NodeIndex int + +type VirtualNode struct { + Index NodeIndex + Network *p2pNetwork + PeerScores *hashmap.Map[NodeIndex, *pubsub.PeerScoreSnapshot] +} + +func (n *VirtualNode) Broadcast(msg *spectypes.SSVMessage) error { + return n.Network.Broadcast(msg) +} + +// VirtualNet is a utility to create & interact with a virtual network of nodes. +type VirtualNet struct { + Nodes []*VirtualNode +} + +func CreateVirtualNet( + t *testing.T, + ctx context.Context, + nodes int, + validatorPubKeys []string, + messageValidatorProvider func(int) validation.MessageValidator, +) *VirtualNet { + var doneSetup atomic.Bool + vn := &VirtualNet{} + ln, routers, err := createNetworkAndSubscribe(t, ctx, LocalNetOptions{ + Nodes: nodes, + MinConnected: nodes - 1, + UseDiscv5: false, + TotalValidators: 1000, + ActiveValidators: 800, + MyValidators: 300, + MessageValidatorProvider: messageValidatorProvider, + PeerScoreInspector: func(selfPeer peer.ID, peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) { + if !doneSetup.Load() { + return + } + node := vn.NodeByPeerID(selfPeer) + if node == nil { + t.Fatalf("self peer not found (%s)", selfPeer) + } + + node.PeerScores.Range(func(index NodeIndex, snapshot *pubsub.PeerScoreSnapshot) bool { + node.PeerScores.Del(index) + return true + }) + for peerID, peerScore := range peerMap { + peerNode := vn.NodeByPeerID(peerID) + if peerNode == nil { + t.Fatalf("peer not found (%s)", peerID) + } + node.PeerScores.Set(peerNode.Index, peerScore) + } + + }, + PeerScoreInspectorInterval: time.Millisecond * 5, + }, validatorPubKeys...) + + require.NoError(t, err) + require.NotNil(t, routers) + require.NotNil(t, ln) + + for i, node := range ln.Nodes { + vn.Nodes = append(vn.Nodes, &VirtualNode{ + Index: NodeIndex(i), + Network: node.(*p2pNetwork), + PeerScores: hashmap.New[NodeIndex, *pubsub.PeerScoreSnapshot](), //{}make(map[NodeIndex]*pubsub.PeerScoreSnapshot), + }) + } + doneSetup.Store(true) + + return vn +} + +func (vn *VirtualNet) NodeByPeerID(peerID peer.ID) *VirtualNode { + for _, node := range vn.Nodes { + if node.Network.Host().ID() == peerID { + return node + } + } + return nil +} + +func (vn *VirtualNet) Close() error { + for _, node := range vn.Nodes { + err := node.Network.Close() + if err != nil { + return err + } + } + return nil +} diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index 63706eba25..7244f22a18 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" @@ -14,6 +15,7 @@ import ( "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" + p2pcommons "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections/mock" @@ -71,9 +73,9 @@ func (ln *LocalNet) WithBootnode(ctx context.Context, logger *zap.Logger) error // CreateAndStartLocalNet creates a new local network and starts it // if any errors occurs during starting local network CreateAndStartLocalNet trying // to create and start local net one more time until pCtx is not Done() -func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuantity, minConnected int, useDiscv5 bool) (*LocalNet, error) { - attempt := func(pCtx context.Context, nodesQuantity, minConnected int, useDiscv5 bool) (*LocalNet, error) { - ln, err := NewLocalNet(pCtx, logger, nodesQuantity, useDiscv5) +func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, options LocalNetOptions) (*LocalNet, error) { + attempt := func(pCtx context.Context) (*LocalNet, error) { + ln, err := NewLocalNet(pCtx, logger, options) if err != nil { return nil, err } @@ -89,14 +91,14 @@ func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuant ctx, cancel := context.WithTimeout(ctx, 15*time.Second) defer cancel() var peers []peer.ID - for len(peers) < minConnected && ctx.Err() == nil { + for len(peers) < options.MinConnected && ctx.Err() == nil { peers = node.(HostProvider).Host().Network().Peers() time.Sleep(time.Millisecond * 100) } if ctx.Err() != nil { - return fmt.Errorf("could not find enough peers for node %d, nodes quantity = %d, found = %d", i, nodesQuantity, len(peers)) + return fmt.Errorf("could not find enough peers for node %d, nodes quantity = %d, found = %d", i, options.Nodes, len(peers)) } - logger.Debug("found enough peers", zap.Int("for node", i), zap.Int("nodesQuantity", nodesQuantity), zap.String("found", fmt.Sprintf("%+v", peers))) + logger.Debug("found enough peers", zap.Int("for node", i), zap.Int("nodesQuantity", options.Nodes), zap.String("found", fmt.Sprintf("%+v", peers))) return nil }) } @@ -109,7 +111,7 @@ func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuant case <-pCtx.Done(): return nil, fmt.Errorf("context is done, network didn't start on time") default: - ln, err := attempt(pCtx, nodesQuantity, minConnected, useDiscv5) + ln, err := attempt(pCtx) if err != nil { for _, node := range ln.Nodes { _ = node.Close() @@ -125,20 +127,48 @@ func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuant } // NewTestP2pNetwork creates a new network.P2PNetwork instance -func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys, logger *zap.Logger, maxPeers int) (network.P2PNetwork, error) { +func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys testing.NodeKeys, logger *zap.Logger, options LocalNetOptions) (network.P2PNetwork, error) { operatorPubkey, err := rsaencryption.ExtractPublicKey(keys.OperatorKey) if err != nil { return nil, err } - cfg := NewNetConfig(keys, format.OperatorID([]byte(operatorPubkey)), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), maxPeers) + cfg := NewNetConfig(keys, format.OperatorID([]byte(operatorPubkey)), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), options.Nodes) cfg.Ctx = ctx cfg.Subnets = "00000000000000000000020000000000" //PAY ATTENTION for future test scenarios which use more than one eth-validator we need to make this field dynamically changing cfg.NodeStorage = mock.NodeStorage{ MockGetPrivateKey: keys.OperatorKey, RegisteredOperatorPublicKeyPEMs: []string{}, } + cfg.Metrics = nil cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork) cfg.Network = networkconfig.TestNetwork + if options.TotalValidators > 0 { + cfg.GetValidatorStats = func() (uint64, uint64, uint64, error) { + return uint64(options.TotalValidators), uint64(options.ActiveValidators), uint64(options.MyValidators), nil + } + } + + pubKey, err := p2pcommons.ECDSAPrivToInterface(keys.NetKey) + if err != nil { + panic(err) + } + selfPeerID, err := peer.IDFromPublicKey(pubKey.GetPublic()) + if err != nil { + panic(err) + } + + if options.MessageValidatorProvider != nil { + cfg.MessageValidator = options.MessageValidatorProvider(nodeIndex) + } else { + cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithSelfAccept(selfPeerID, true)) + } + + if options.PeerScoreInspector != nil && options.PeerScoreInspectorInterval > 0 { + cfg.PeerScoreInspector = func(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) { + options.PeerScoreInspector(selfPeerID, peerMap) + } + cfg.PeerScoreInspectorInterval = options.PeerScoreInspectorInterval + } p := New(logger, cfg) err = p.Setup(logger) @@ -148,20 +178,28 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys return p, nil } +type LocalNetOptions struct { + MessageValidatorProvider func(int) validation.MessageValidator + Nodes int + MinConnected int + UseDiscv5 bool + TotalValidators, ActiveValidators, MyValidators int + PeerScoreInspector func(selfPeer peer.ID, peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) + PeerScoreInspectorInterval time.Duration +} + // NewLocalNet creates a new mdns network -func NewLocalNet(ctx context.Context, logger *zap.Logger, n int, useDiscv5 bool) (*LocalNet, error) { +func NewLocalNet(ctx context.Context, logger *zap.Logger, options LocalNetOptions) (*LocalNet, error) { ln := &LocalNet{} ln.udpRand = make(testing.UDPPortsRandomizer) - if useDiscv5 { + if options.UseDiscv5 { if err := ln.WithBootnode(ctx, logger); err != nil { return nil, err } } - i := 0 - nodes, keys, err := testing.NewLocalTestnet(ctx, n, func(pctx context.Context, keys testing.NodeKeys) network.P2PNetwork { - i++ - logger := logger.Named(fmt.Sprintf("node-%d", i)) - p, err := ln.NewTestP2pNetwork(pctx, keys, logger, n) + nodes, keys, err := testing.NewLocalTestnet(ctx, options.Nodes, func(pctx context.Context, nodeIndex int, keys testing.NodeKeys) network.P2PNetwork { + logger := logger.Named(fmt.Sprintf("node-%d", nodeIndex)) + p, err := ln.NewTestP2pNetwork(pctx, nodeIndex, keys, logger, options) if err != nil { logger.Error("could not setup network", zap.Error(err)) } @@ -196,6 +234,7 @@ func NewNetConfig(keys testing.NodeKeys, operatorPubKeyHash string, bn *discover MaxBatchResponse: 25, MaxPeers: maxPeers, PubSubTrace: false, + PubSubScoring: true, NetworkPrivateKey: keys.NetKey, OperatorPrivateKey: keys.OperatorKey, OperatorPubKeyHash: operatorPubKeyHash, diff --git a/network/peers/connections/mock/mock_storage.go b/network/peers/connections/mock/mock_storage.go index dc85870cad..a6944eb4b9 100644 --- a/network/peers/connections/mock/mock_storage.go +++ b/network/peers/connections/mock/mock_storage.go @@ -89,7 +89,7 @@ func (m NodeStorage) DeleteOperatorData(txn basedb.ReadWriter, id spectypes.Oper func (m NodeStorage) ListOperators(txn basedb.Reader, from uint64, to uint64) ([]registrystorage.OperatorData, error) { //TODO implement me - panic("implement me") + return nil, errors.New("empty") } func (m NodeStorage) GetOperatorsPrefix() []byte { diff --git a/network/testing/local.go b/network/testing/local.go index d3610dddae..2c36b970d7 100644 --- a/network/testing/local.go +++ b/network/testing/local.go @@ -7,7 +7,7 @@ import ( ) // NetworkFactory is a generic factory for network instances -type NetworkFactory func(pctx context.Context, keys NodeKeys) network.P2PNetwork +type NetworkFactory func(pctx context.Context, nodeIndex int, keys NodeKeys) network.P2PNetwork // NewLocalTestnet creates a new local network func NewLocalTestnet(ctx context.Context, n int, factory NetworkFactory) ([]network.P2PNetwork, []NodeKeys, error) { @@ -18,7 +18,7 @@ func NewLocalTestnet(ctx context.Context, n int, factory NetworkFactory) ([]netw } for i, k := range keys { - nodes[i] = factory(ctx, k) + nodes[i] = factory(ctx, i, k) } return nodes, keys, nil diff --git a/network/topics/params/helpers.go b/network/topics/params/helpers.go index 45e468ab63..cbbd47f502 100644 --- a/network/topics/params/helpers.go +++ b/network/topics/params/helpers.go @@ -7,10 +7,9 @@ import ( "github.com/pkg/errors" ) -// scoreByWeight provides the relevant score by the provided weight and threshold. -func scoreByWeight(maxScore float64, weight, threshold float64) float64 { - return maxScore / (weight * threshold * threshold) -} +const ( + oneEpochDuration = (12 * time.Second) * 32 +) // scoreDecay determines the decay rate from the provided time period till // the decayToZero value. Ex: ( 1 -> 0.01) @@ -19,11 +18,6 @@ func scoreDecay(totalDecayDuration time.Duration, decayIntervalDuration time.Dur return math.Pow(decayToZero, 1/ticks) } -// the cap for `inMesh` time scoring. -func inMeshCap(inMeshTime time.Duration) float64 { - return float64((3600 * time.Second) / inMeshTime) -} - // decayThreshold is used to determine the threshold from the decay limit with // a provided growth rate. This applies the decay rate to a // computed limit. diff --git a/network/topics/params/peer_score.go b/network/topics/params/peer_score.go index 91492ccb1b..5ff309aac2 100644 --- a/network/topics/params/peer_score.go +++ b/network/topics/params/peer_score.go @@ -9,62 +9,77 @@ import ( ) const ( - gossipThreshold = -4000 - defaultIPColocationThreshold = 10 // TODO: check a lower value such as in ETH (3) + // Thresholds + gossipThreshold = -4000 + publishThreshold = -8000 + graylistThreshold = -16000 + acceptPXThreshold = 100 + opportunisticGraftThreshold = 5 + + // Overall parameters + topicScoreCap = 32.72 + decayInterval = 32 * (time.Second * 12) // One epoch + decayToZero = 0.01 + retainScore = 38400 + + // P5 + appSpecificWeight = 0 + + // P6 + ipColocationFactorThreshold = 10 + ipColocationFactorWeight = -topicScoreCap + + // P7 + behaviourPenaltyThreshold = 6 ) // PeerScoreThresholds returns the thresholds to use for peer scoring func PeerScoreThresholds() *pubsub.PeerScoreThresholds { return &pubsub.PeerScoreThresholds{ GossipThreshold: gossipThreshold, - PublishThreshold: -8000, - GraylistThreshold: -16000, - AcceptPXThreshold: 100, - OpportunisticGraftThreshold: 5, + PublishThreshold: publishThreshold, + GraylistThreshold: graylistThreshold, + AcceptPXThreshold: acceptPXThreshold, + OpportunisticGraftThreshold: opportunisticGraftThreshold, } } // PeerScoreParams returns peer score params according to the given options -func PeerScoreParams(oneEpoch, msgIDCacheTTL time.Duration, ipColocationWeight float64, ipColocationThreshold int, ipWhilelist ...*net.IPNet) *pubsub.PeerScoreParams { +func PeerScoreParams(oneEpoch, msgIDCacheTTL time.Duration, ipWhilelist ...*net.IPNet) *pubsub.PeerScoreParams { if oneEpoch == 0 { oneEpoch = oneEpochDuration } - maxPositiveScore := (maxInMeshScore + maxFirstDeliveryScore) * (subnetTopicsWeight) - topicScoreCap := maxPositiveScore / 4.0 // ETH divides by 2, we use lower value to reduce cap - - behaviourPenaltyThreshold := 10.0 // using a larger threshold than ETH (6) to reduce the effect of behavioural penalty - behaviourPenaltyDecay := scoreDecay(oneEpoch*10, oneEpoch) - // TODO: rate (10.0) should be injected to this function - targetVal, _ := decayConvergence(behaviourPenaltyDecay, 10.0/slotsPerEpoch) + // P7 calculation + behaviourPenaltyDecay := scoreDecay(oneEpoch*10, decayInterval) + maxAllowedRatePerDecayInterval := 10.0 + targetVal, _ := decayConvergence(behaviourPenaltyDecay, maxAllowedRatePerDecayInterval) targetVal = targetVal - behaviourPenaltyThreshold behaviourPenaltyWeight := gossipThreshold / (targetVal * targetVal) - if ipColocationWeight == 0 { - ipColocationWeight = -topicScoreCap - } - if ipColocationThreshold == 0 { - ipColocationThreshold = defaultIPColocationThreshold - } return &pubsub.PeerScoreParams{ - Topics: make(map[string]*pubsub.TopicScoreParams), + Topics: make(map[string]*pubsub.TopicScoreParams), + // Overall parameters TopicScoreCap: topicScoreCap, + DecayInterval: decayInterval, + DecayToZero: decayToZero, + RetainScore: retainScore, + SeenMsgTTL: msgIDCacheTTL, + + // P5 AppSpecificScore: func(p peer.ID) float64 { - // TODO: implement return 0 }, - AppSpecificWeight: 1, - IPColocationFactorWeight: ipColocationWeight, - IPColocationFactorThreshold: ipColocationThreshold, + AppSpecificWeight: appSpecificWeight, + + // P6 + IPColocationFactorWeight: ipColocationFactorWeight, + IPColocationFactorThreshold: ipColocationFactorThreshold, IPColocationFactorWhitelist: ipWhilelist, - SeenMsgTTL: msgIDCacheTTL, - BehaviourPenaltyWeight: behaviourPenaltyWeight, - BehaviourPenaltyThreshold: behaviourPenaltyThreshold, - BehaviourPenaltyDecay: behaviourPenaltyDecay, - DecayInterval: oneEpoch, - DecayToZero: decayToZero, - // RetainScore is the time to remember counters for a disconnected peer - // TODO: ETH uses 100 epoch, we reduced it to 10 until scoring will be more mature - RetainScore: oneEpoch * 10, + + // P7 + BehaviourPenaltyWeight: behaviourPenaltyWeight, + BehaviourPenaltyThreshold: behaviourPenaltyThreshold, + BehaviourPenaltyDecay: behaviourPenaltyDecay, } } diff --git a/network/topics/params/scores_test.go b/network/topics/params/scores_test.go index 003dc49792..04ba25895f 100644 --- a/network/topics/params/scores_test.go +++ b/network/topics/params/scores_test.go @@ -63,7 +63,7 @@ func TestTopicScoreParams(t *testing.T) { } func TestPeerScoreParams(t *testing.T) { - peerScoreParams := PeerScoreParams(oneEpochDuration, 550*(time.Millisecond*700), 0, 0) + peerScoreParams := PeerScoreParams(oneEpochDuration, 550*(time.Millisecond*700)) raw, err := peerScoreParamsString(peerScoreParams) require.NoError(t, err) require.NotNil(t, raw) diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index a7b0942f34..2389cebefc 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -9,25 +9,30 @@ import ( ) const ( - gossipSubD = 8 - oneEpochDuration = (12 * time.Second) * 32 - slotsPerEpoch = 32 - // maxInMeshScore describes the max score a peer can attain from being in the mesh - maxInMeshScore = 10 - // maxFirstDeliveryScore describes the max score a peer can obtain from first deliveries - maxFirstDeliveryScore = 40 - // decayToZero specifies the terminal value that we will use when decaying a value. - decayToZero = 0.01 - // dampeningFactor reduces the amount by which the various thresholds and caps are created. - // using value of 50 (prysm changed to 90) - dampeningFactor = 50 - - subnetTopicsWeight = 4.0 - invalidMeshDeliveriesWeight = -800 -) - -const ( + // Network Topology + gossipSubD = 8 minActiveValidators = 200 + + // Overall parameters + totalTopicsWeight = 4.0 + + // P1 + maxTimeInMeshScore = 10 // max score a peer can attain from being in the mesh + timeInMeshQuantum = 12 + timeInMeshQuantumCap = 3600 + + // P2 + firstDeliveryDecayEpochs = time.Duration(4) + maxFirstDeliveryScore = 80 // max score a peer can obtain from first deliveries + + // P3 + meshDeliveryDecayEpochs = time.Duration(16) + meshDeliveryDampeningFactor = 1.0 / 50.0 + meshDeliveryCapFactor = 16 + + // P4 + invalidMessageDecayEpochs = time.Duration(100) + maxInvalidMessagesAllowed = 20 ) var ( @@ -42,8 +47,6 @@ type NetworkOpts struct { ActiveValidators int // Subnets is the number of subnets in the network Subnets int - //// Groups is the amount of groups used in the network - // Groups int // OneEpochDuration is used as a time-frame length to control scoring in a dynamic way OneEpochDuration time.Duration // TotalTopicsWeight is the weight of all the topics in the network @@ -52,17 +55,33 @@ type NetworkOpts struct { // TopicOpts is the config struct for topic configurations type TopicOpts struct { - // TopicWeight is the weight of the topic - TopicWeight float64 - // ExpectedMsgRate is the expected rate for the topic - ExpectedMsgRate float64 - InvalidMsgDecayTime time.Duration - FirstMsgDecayTime time.Duration - MeshMsgDecayTime time.Duration - MeshMsgCapFactor float64 - MeshMsgActivationTime time.Duration // D is the gossip degree D int + + // ExpectedMsgRate is the expected rate for the topic + ExpectedMsgRate float64 + + // TopicWeight is the weight of the topic + TopicWeight float64 + + // P1 + MaxTimeInMeshScore float64 + TimeInMeshQuantum int + TimeInMeshQuantumCap int + + // P2 + FirstDeliveryDecayEpochs time.Duration + MaxFirstDeliveryScore float64 + + // P3 + MeshDeliveryDecayEpochs time.Duration + MeshDeliveryDampeningFactor float64 + MeshDeliveryCapFactor float64 + MeshDeliveryActivationTime time.Duration + + // P4 + InvalidMessageDecayEpochs time.Duration + MaxInvalidMessagesAllowed int } // Options is the struct used for creating topic score params @@ -72,15 +91,54 @@ type Options struct { } func (o *Options) defaults() { + // Network if o.Network.OneEpochDuration == 0 { o.Network.OneEpochDuration = oneEpochDuration } if o.Network.TotalTopicsWeight == 0 { - o.Network.TotalTopicsWeight = subnetTopicsWeight // + ... + o.Network.TotalTopicsWeight = totalTopicsWeight } + // Topic if o.Topic.D == 0 { o.Topic.D = gossipSubD } + // Topic - P1 + if o.Topic.MaxTimeInMeshScore == 0 { + o.Topic.MaxTimeInMeshScore = maxTimeInMeshScore + } + if o.Topic.TimeInMeshQuantum == 0 { + o.Topic.TimeInMeshQuantum = timeInMeshQuantum + } + if o.Topic.TimeInMeshQuantumCap == 0 { + o.Topic.TimeInMeshQuantumCap = timeInMeshQuantumCap + } + // Topic - P2 + if o.Topic.FirstDeliveryDecayEpochs == 0 { + o.Topic.FirstDeliveryDecayEpochs = firstDeliveryDecayEpochs + } + if o.Topic.MaxFirstDeliveryScore == 0 { + o.Topic.MaxFirstDeliveryScore = maxFirstDeliveryScore + } + // Topic - P3 + if o.Topic.MeshDeliveryDecayEpochs == 0 { + o.Topic.MeshDeliveryDecayEpochs = meshDeliveryDecayEpochs + } + if o.Topic.MeshDeliveryDampeningFactor == 0 { + o.Topic.MeshDeliveryDampeningFactor = meshDeliveryDampeningFactor + } + if o.Topic.MeshDeliveryCapFactor == 0 { + o.Topic.MeshDeliveryCapFactor = meshDeliveryCapFactor + } + if o.Topic.MeshDeliveryActivationTime == 0 { + o.Topic.MeshDeliveryActivationTime = o.Network.OneEpochDuration * 3 + } + // Topic - P4 + if o.Topic.InvalidMessageDecayEpochs == 0 { + o.Topic.InvalidMessageDecayEpochs = invalidMessageDecayEpochs + } + if o.Topic.MaxInvalidMessagesAllowed == 0 { + o.Topic.MaxInvalidMessagesAllowed = maxInvalidMessagesAllowed + } } func (o *Options) validate() error { @@ -92,10 +150,10 @@ func (o *Options) validate() error { // maxScore attainable by a peer func (o *Options) maxScore() float64 { - return (maxInMeshScore + maxFirstDeliveryScore) * o.Network.TotalTopicsWeight + return (o.Topic.MaxTimeInMeshScore + o.Topic.MaxFirstDeliveryScore) * o.Network.TotalTopicsWeight } -// NewOpts creates new TopicOpts instance with defaults +// NewOpts creates new TopicOpts instance func NewOpts(activeValidators, subnets int) Options { return Options{ Network: NetworkOpts{ @@ -108,70 +166,88 @@ func NewOpts(activeValidators, subnets int) Options { // NewSubnetTopicOpts creates new TopicOpts for a subnet topic func NewSubnetTopicOpts(activeValidators, subnets int) Options { + + // Create options with default values opts := NewOpts(activeValidators, subnets) opts.defaults() - opts.Topic.TopicWeight = subnetTopicsWeight / float64(opts.Network.Subnets) + + // Set topic weight with equal weights + opts.Topic.TopicWeight = opts.Network.TotalTopicsWeight / float64(opts.Network.Subnets) + + // Set expected message rate based on stage metrics validatorsPerSubnet := float64(opts.Network.ActiveValidators) / float64(opts.Network.Subnets) - valMsgsPerEpoch := 9.0 - opts.Topic.ExpectedMsgRate = validatorsPerSubnet * valMsgsPerEpoch / float64(slotsPerEpoch) - opts.Topic.FirstMsgDecayTime = time.Duration(8) - opts.Topic.MeshMsgDecayTime = time.Duration(16) - opts.Topic.MeshMsgCapFactor = 16.0 // using a large factor until we have more accurate values - opts.Topic.MeshMsgActivationTime = opts.Network.OneEpochDuration + msgsPerValidatorPerSecond := 600.0 / 10000.0 + opts.Topic.ExpectedMsgRate = validatorsPerSubnet * msgsPerValidatorPerSecond + return opts } // TopicParams creates pubsub.TopicScoreParams from the given TopicOpts -// implementation is based on ETH2.0 and prysm as a reference, with alignments to ssv: +// implementation is based on ETH2.0, with alignments to ssv: // https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { + // Validate options if err := opts.validate(); err != nil { return nil, err } + + // Set to default if not set opts.defaults() - oneSlot := opts.Network.OneEpochDuration / 32.0 - inMeshTime := oneSlot + expectedMessagesPerDecayInterval := opts.Topic.ExpectedMsgRate * decayInterval.Seconds() + + // P1 + timeInMeshCap := float64(opts.Topic.TimeInMeshQuantumCap) / float64(opts.Topic.TimeInMeshQuantum) + + // P2 + firstMessageDeliveriesDecay := scoreDecay(opts.Network.OneEpochDuration*opts.Topic.FirstDeliveryDecayEpochs, decayInterval) + firstMessageDeliveriesCap, err := decayConvergence(firstMessageDeliveriesDecay, 2*(expectedMessagesPerDecayInterval)/float64(opts.Topic.D)) + if err != nil { + return nil, errors.Wrap(err, "could not calculate decay convergence for first message delivery cap") + } + + // P3 + meshMessageDeliveriesDecay := scoreDecay(opts.Network.OneEpochDuration*opts.Topic.MeshDeliveryDecayEpochs, decayInterval) + meshMessageDeliveriesThreshold, err := decayThreshold(meshMessageDeliveriesDecay, (expectedMessagesPerDecayInterval * opts.Topic.MeshDeliveryDampeningFactor)) + if err != nil { + return nil, errors.Wrap(err, "could not calculate threshold for mesh message deliveries threshold") + } + meshMessageDeliveriesWeight := -(opts.maxScore() / (opts.Topic.TopicWeight * math.Pow(meshMessageDeliveriesThreshold, 2))) + MeshMessageDeliveriesCap := meshMessageDeliveriesThreshold * opts.Topic.MeshDeliveryCapFactor + + // P4 + invalidMessageDeliveriesDecay := scoreDecay(opts.Topic.InvalidMessageDecayEpochs*opts.Network.OneEpochDuration, decayInterval) + invalidMessageDeliveriesWeight := graylistThreshold / (opts.Topic.TopicWeight * float64(opts.Topic.MaxInvalidMessagesAllowed) * float64(opts.Topic.MaxInvalidMessagesAllowed)) params := &pubsub.TopicScoreParams{ - TopicWeight: opts.Topic.TopicWeight, - TimeInMeshWeight: maxInMeshScore / inMeshCap(inMeshTime), - TimeInMeshQuantum: inMeshTime, - TimeInMeshCap: inMeshCap(inMeshTime), - } - - if opts.Topic.FirstMsgDecayTime > 0 { - params.FirstMessageDeliveriesDecay = scoreDecay(opts.Topic.FirstMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) - firstMsgDeliveryCap, err := decayConvergence(params.FirstMessageDeliveriesDecay, 2*opts.Topic.ExpectedMsgRate/float64(opts.Topic.D)) - if err != nil { - return nil, errors.Wrap(err, "could not calculate first msg delivery cap") - } - params.FirstMessageDeliveriesCap = firstMsgDeliveryCap - params.FirstMessageDeliveriesWeight = maxFirstDeliveryScore / firstMsgDeliveryCap - } - - if opts.Topic.MeshMsgDecayTime > 0 { - params.MeshMessageDeliveriesDecay = scoreDecay(opts.Topic.MeshMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) - // a peer must send us at least 1/50 of the regular messages in time, very conservative limit - meshMsgDeliveriesThreshold, err := decayThreshold(params.MeshMessageDeliveriesDecay, math.Min(2.0, opts.Topic.ExpectedMsgRate/dampeningFactor)) - if err != nil { - return nil, errors.Wrap(err, "could not calculate mesh message deliveries threshold") - } - params.MeshMessageDeliveriesThreshold = meshMsgDeliveriesThreshold - params.MeshMessageDeliveriesCap = opts.Topic.MeshMsgCapFactor * meshMsgDeliveriesThreshold - params.MeshMessageDeliveriesWeight = -scoreByWeight(opts.maxScore(), opts.Topic.TopicWeight, - math.Max(4.0, params.MeshMessageDeliveriesCap)) // used cap instead of threshold to reduce weight - params.MeshMessageDeliveriesActivation = opts.Topic.MeshMsgActivationTime - params.MeshMessageDeliveriesWindow = 2 * time.Second - params.MeshFailurePenaltyWeight = params.MeshMessageDeliveriesWeight - params.MeshFailurePenaltyDecay = params.MeshMessageDeliveriesDecay - } - - if opts.Topic.InvalidMsgDecayTime > 0 { - params.InvalidMessageDeliveriesWeight = invalidMeshDeliveriesWeight - params.InvalidMessageDeliveriesDecay = scoreDecay(opts.Topic.InvalidMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) - } else { - params.InvalidMessageDeliveriesDecay = 0.1 + // Topic-specific parameters + TopicWeight: opts.Topic.TopicWeight, + + // P1 + TimeInMeshQuantum: time.Duration(opts.Topic.TimeInMeshQuantum) * time.Second, + TimeInMeshCap: timeInMeshCap, + TimeInMeshWeight: opts.Topic.MaxTimeInMeshScore / timeInMeshCap, + + // P2 + FirstMessageDeliveriesDecay: firstMessageDeliveriesDecay, + FirstMessageDeliveriesCap: firstMessageDeliveriesCap, + FirstMessageDeliveriesWeight: opts.Topic.MaxFirstDeliveryScore / firstMessageDeliveriesCap, + + // P3 + MeshMessageDeliveriesDecay: meshMessageDeliveriesDecay, + MeshMessageDeliveriesThreshold: meshMessageDeliveriesThreshold, + MeshMessageDeliveriesWeight: meshMessageDeliveriesWeight, + MeshMessageDeliveriesCap: MeshMessageDeliveriesCap, + MeshMessageDeliveriesActivation: opts.Topic.MeshDeliveryActivationTime, + MeshMessageDeliveriesWindow: 2 * time.Second, + + // P3b + MeshFailurePenaltyDecay: meshMessageDeliveriesDecay, + MeshFailurePenaltyWeight: meshMessageDeliveriesWeight, + + // P4 + InvalidMessageDeliveriesDecay: invalidMessageDeliveriesDecay, + InvalidMessageDeliveriesWeight: invalidMessageDeliveriesWeight, } return params, nil diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index 2422422e2b..155b6968b4 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -34,7 +34,7 @@ const ( // validateThrottle is the amount of goroutines used for pubsub msg validation validateThrottle = 8192 // scoreInspectInterval is the interval for performing score inspect, which goes over all peers scores - defaultScoreInspectInterval = time.Minute + defaultScoreInspectInterval = 5 * time.Minute // msgIDCacheTTL specifies how long a message ID will be remembered as seen, 6.4m (as ETH 2.0) msgIDCacheTTL = params.HeartbeatInterval * 550 ) @@ -149,7 +149,7 @@ func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig) (*pub inspectInterval = defaultScoreInspectInterval } - peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPColocationWeight, 0, cfg.Scoring.IPWhilelist...) + peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPWhilelist...) psOpts = append(psOpts, pubsub.WithPeerScore(peerScoreParams, params.PeerScoreThresholds()), pubsub.WithPeerScoreInspect(inspector, inspectInterval)) async.Interval(ctx, time.Hour, func() { diff --git a/network/topics/scoring.go b/network/topics/scoring.go index 9e47514262..b99968df4a 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -27,20 +27,22 @@ func DefaultScoringConfig() *ScoringConfig { func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.ExtendedPeerScoreInspectFn { return func(scores map[peer.ID]*pubsub.PeerScoreSnapshot) { for pid, peerScores := range scores { - // scores := []*peers.NodeScore{ - // { - // Name: "PS_Score", - // Value: peerScores.Score, - // }, { - // Name: "PS_BehaviourPenalty", - // Value: peerScores.BehaviourPenalty, - // }, { - // Name: "PS_IPColocationFactor", - // Value: peerScores.IPColocationFactor, - // }, - //} + + //filter all topics that have InvalidMessageDeliveries > 0 + filtered := make(map[string]*pubsub.TopicScoreSnapshot) + for topic, snapshot := range peerScores.Topics { + if snapshot.InvalidMessageDeliveries > 0 { + filtered[topic] = snapshot + } + } + // log peer overall score and topics scores logger.Debug("peer scores", fields.PeerID(pid), - zap.Any("peerScores", peerScores)) + fields.PeerScore(peerScores.Score), + zap.Any("invalid_messages", filtered), + zap.Float64("ip_colocation", peerScores.IPColocationFactor), + zap.Float64("behaviour_penalty", peerScores.BehaviourPenalty), + zap.Float64("app_specific_penalty", peerScores.AppSpecificScore)) + metricPubsubPeerScoreInspect.WithLabelValues(pid.String()).Set(peerScores.Score) // err := scoreIdx.Score(pid, scores...) // if err != nil { From 49793e62c02099b70283e537dd77306b163ea497 Mon Sep 17 00:00:00 2001 From: rehs0y Date: Wed, 22 Nov 2023 00:55:11 +0200 Subject: [PATCH 47/54] scoring improvements (#1210) * dd peerid to msg validation logs * add more verbose score logs * increase peer balance interval * disable mesh deliveries weight * increase retainScore to 100 epochs * comment * fix lint * less logs * comment --------- Co-authored-by: moshe-blox --- message/validation/validation.go | 8 +++++--- network/p2p/p2p.go | 2 +- network/topics/params/peer_score.go | 2 +- network/topics/params/topic_score.go | 7 ++++++- network/topics/scoring.go | 30 ++++++++++++++++++++++++---- 5 files changed, 39 insertions(+), 10 deletions(-) diff --git a/message/validation/validation.go b/message/validation/validation.go index dfaa93c6f9..2a363efe25 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -243,12 +243,14 @@ func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, peerID peer round = descriptor.Consensus.Round } + f := append(descriptor.Fields(), fields.PeerID(peerID)) + if err != nil { var valErr Error if errors.As(err, &valErr) { if valErr.Reject() { if !valErr.Silent() { - f := append(descriptor.Fields(), zap.Error(err)) + f = append(f, zap.Error(err)) mv.logger.Debug("rejecting invalid message", f...) } @@ -257,7 +259,7 @@ func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, peerID peer } if !valErr.Silent() { - f := append(descriptor.Fields(), zap.Error(err)) + f = append(f, zap.Error(err)) mv.logger.Debug("ignoring invalid message", f...) } mv.metrics.MessageIgnored(valErr.Text(), descriptor.Role, round) @@ -265,7 +267,7 @@ func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, peerID peer } mv.metrics.MessageIgnored(err.Error(), descriptor.Role, round) - f := append(descriptor.Fields(), zap.Error(err)) + f = append(f, zap.Error(err)) mv.logger.Debug("ignoring invalid message", f...) return pubsub.ValidationIgnore } diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index e959fab776..96c8d0771e 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -39,7 +39,7 @@ const ( ) const ( - connManagerGCInterval = time.Minute + connManagerGCInterval = 3 * time.Minute connManagerGCTimeout = time.Minute peersReportingInterval = 60 * time.Second peerIdentitiesReportingInterval = 5 * time.Minute diff --git a/network/topics/params/peer_score.go b/network/topics/params/peer_score.go index 5ff309aac2..a1af18d988 100644 --- a/network/topics/params/peer_score.go +++ b/network/topics/params/peer_score.go @@ -20,7 +20,7 @@ const ( topicScoreCap = 32.72 decayInterval = 32 * (time.Second * 12) // One epoch decayToZero = 0.01 - retainScore = 38400 + retainScore = 100 * 32 * 12 * time.Second // P5 appSpecificWeight = 0 diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index 2389cebefc..6bab61eacc 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -26,9 +26,11 @@ const ( maxFirstDeliveryScore = 80 // max score a peer can obtain from first deliveries // P3 + // Mesh scording is disabled for now. meshDeliveryDecayEpochs = time.Duration(16) meshDeliveryDampeningFactor = 1.0 / 50.0 meshDeliveryCapFactor = 16 + meshScoringEnabled = false // P4 invalidMessageDecayEpochs = time.Duration(100) @@ -212,7 +214,10 @@ func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { if err != nil { return nil, errors.Wrap(err, "could not calculate threshold for mesh message deliveries threshold") } - meshMessageDeliveriesWeight := -(opts.maxScore() / (opts.Topic.TopicWeight * math.Pow(meshMessageDeliveriesThreshold, 2))) + var meshMessageDeliveriesWeight float64 + if meshScoringEnabled { + meshMessageDeliveriesWeight = -(opts.maxScore() / (opts.Topic.TopicWeight * math.Pow(meshMessageDeliveriesThreshold, 2))) + } MeshMessageDeliveriesCap := meshMessageDeliveriesThreshold * opts.Topic.MeshDeliveryCapFactor // P4 diff --git a/network/topics/scoring.go b/network/topics/scoring.go index b99968df4a..7dd0ac7064 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -1,6 +1,7 @@ package topics import ( + "math" "time" "github.com/bloxapp/ssv/logging/fields" @@ -30,18 +31,39 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.Extend //filter all topics that have InvalidMessageDeliveries > 0 filtered := make(map[string]*pubsub.TopicScoreSnapshot) + var totalInvalidMessages float64 + var totalLowMeshDeliveries int for topic, snapshot := range peerScores.Topics { - if snapshot.InvalidMessageDeliveries > 0 { + if snapshot.InvalidMessageDeliveries != 0 { filtered[topic] = snapshot } + if snapshot.InvalidMessageDeliveries > 0 { + totalInvalidMessages += math.Sqrt(snapshot.InvalidMessageDeliveries) + } + if snapshot.MeshMessageDeliveries < 107 { + totalLowMeshDeliveries++ + } } - // log peer overall score and topics scores - logger.Debug("peer scores", fields.PeerID(pid), + + fields := []zap.Field{ + fields.PeerID(pid), fields.PeerScore(peerScores.Score), zap.Any("invalid_messages", filtered), zap.Float64("ip_colocation", peerScores.IPColocationFactor), zap.Float64("behaviour_penalty", peerScores.BehaviourPenalty), - zap.Float64("app_specific_penalty", peerScores.AppSpecificScore)) + zap.Float64("app_specific_penalty", peerScores.AppSpecificScore), + zap.Float64("total_low_mesh_deliveries", float64(totalLowMeshDeliveries)), + zap.Float64("total_invalid_messages", totalInvalidMessages), + zap.Any("invalid_messages", filtered), + } + + // log if peer score is below threshold + if peerScores.Score < -1000 { + fields = append(fields, zap.Bool("low_score", true)) + } + + // log peer overall score and topics scores + logger.Debug("peer scores", fields...) metricPubsubPeerScoreInspect.WithLabelValues(pid.String()).Set(peerScores.Score) // err := scoreIdx.Score(pid, scores...) From 064df8ec513e53a7327ca8e161ddf35e7d22f91e Mon Sep 17 00:00:00 2001 From: rehs0y Date: Wed, 22 Nov 2023 12:01:36 +0200 Subject: [PATCH 48/54] update permissionless fork epochs (#1211) * set fork epochs and date two days forward * set mainnet fork to be in the far future. * set epoch to maxuint * maxuint --------- Co-authored-by: moshe-blox --- networkconfig/holesky.go | 2 +- networkconfig/jato-v2.go | 2 +- networkconfig/mainnet.go | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go index 4b1f36a9a7..ce08531948 100644 --- a/networkconfig/holesky.go +++ b/networkconfig/holesky.go @@ -19,5 +19,5 @@ var Holesky = NetworkConfig{ "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", }, WhitelistedOperatorKeys: []string{}, - PermissionlessActivationEpoch: 13500, // Nov-27-2023 12:00:00 PM UTC + PermissionlessActivationEpoch: 13950, // Nov-29-2023 12:00:00 PM UTC } diff --git a/networkconfig/jato-v2.go b/networkconfig/jato-v2.go index 051826a858..f329699c54 100644 --- a/networkconfig/jato-v2.go +++ b/networkconfig/jato-v2.go @@ -33,5 +33,5 @@ var JatoV2 = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNmkwelNHRzFiaHlPZU8xVDVxc2UKOFpHbElBQ2pmemVYQzhpYVVReGVCb0dlVGRvN0tqalkwNy80b3hBNkhjdG45bEtxd1BodG5ISXIvZ1RlWXNYUwp5QVhPL1Q5K2RQcng1ZEp3SEVCdm5BcmNSQkNzaGF5Sng2S0xiZ3RJb2dGSWhkK1ptaFpiWFpWZVp5THhzK2tZCnM4djVwcHBIbWNwWHRwUVAxWm1ycndpTC9hZU5JNzczbUlrZ1pBOGdNK2Z5S2RtTGJrQXdXZWh1SXZKRmpuVCsKQlVkUHUzWGJIemU2SlJnY2NYNmZnM1gwOTJibG9VMzRxY1VIelNhWU9TZlc2TUpEbFgzQzJCeFhCZ042VFV0aQpDN2k2ZE9qaW14RzlSMkp4ZHVhZGpUeEM1MHl5OE9IVWpMVGNkc2pWRjdYNXdGUzFqaDI5aFpDY0FoeDB2NDg3CjdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNldITnNBdTdSYnMxM0I2c0taWXgKVnZuMldlTy9YMTdSeUx1MjA0K2VtbjkvSGhIRlhXT29CMGczekNZQWp2WWdsbFJka0laTWt3ZkFUNGZvVjVTKwpvNzFFQ1dFN1ZuaytxcWd0U3k5M0ZTTVJzUG9vNngrTUd4ZURBQ3RQbDdQV1EyTXJmV1hkNzVwV1p5TVd5VndHCktPbFo0RHhoQ0VOcXlRcndlOTkybU9wVDZBcTJ1TmVsUmdESUJDSW1CV01NcUl2aXdhSU96MlBmTWR1L3ZVTWgKcVFuNGJJZjFpcVk2WGlKU1g2bDJvUWlTb09VMjRvNkFCdHlHbzRpTDJXN2tOajVUa1hOOEVzeGc3WmUveVQ0YgpKNGtvVjdmNUE3dmpMbHc1ZkdjWDR1bTBNK1QwbnczUlVIY3pHK1E3U1VGMTFGU3c0VnM1WVBHWC84a2tzdXgyCkx3SURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, - PermissionlessActivationEpoch: 220257, // Nov-27-2023 12:04:48 PM UTC + PermissionlessActivationEpoch: 220707, // Nov-29-2023 12:04:48 PM UTC } diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 3b22699a64..9da6a060a5 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -1,6 +1,7 @@ package networkconfig import ( + "math" "math/big" spectypes "github.com/bloxapp/ssv-spec/types" @@ -40,5 +41,5 @@ var Mainnet = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcU5Sd0xWSHNWMEloUjdjdUJkb1AKZnVwNTkydEJFSG0vRllDREZQbERMR2NVZ2NzZ29PdHBsV2hMRjBGSzIwQ3ppVi83WVZzcWpxcDh3VDExM3pBbQoxOTZZRlN6WmUzTFhOQXFRWlBwbDlpOVJxdVJJMGlBT2xiWUp0ampJRjd2ZVZLbVdybzMwWTZDV3JPcHpVQ1BPClRGVEpGZ0hvZmtQT2pabmprNURtdDg2ZURveUxzenJQZWQ0LzlyR2NNVUp4WnJBSjEvbFR1ajNaWWVJUk0wS04KUVQ0eitPb3p0T0dBeDVVcUk2THpQL3NGOWRJM3BzM3BIb3dXOWF2RHp3Qm94Y3hWam14NWhRMXowOTN4MnlkYgpWcjgxNDgzTzdqUkt6eFpXeEduOFJzZUROZkxwSi93VFJiQ0lVOFhwUC9IKzd6TWNGMG1HbVlUcjAvcWR1bVNsCjNRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdmRWVVJ0OFgxbFA5VDVSUUdYdVkKcFpZWjVBb3VuSEdUakMvQ1FoTmQ5RC9kT2kvSDUwVW1PdVBpTzhYYUF4UFRGcGIrZ2xCeGJRRHVQUGN1cENPdQpKN09lVTBvdzdsQjVMclZlWWt3RExnSHY3bDQwcjRWVTM3NlFueGhuS0JyVHNkaWdmZHJYUWZveGRhajVQQ0VYCnFjK1ozNXFPUmpCZ3dublRlbEJjc2NLMHorSkJaQzU0OXFOWThMbm9aMTBuRFptdW1YVDlac3dISCtJVkZacDYKMEZTY0k0V1V5U1gxVnJJT2tSandoSWlCSFk3YkhrZ01Bci9xeStuRmlFUUVRV2Q2VXAwOWtkS0hNVmdtVFp4KwprQXZRbFZ0Z3luYkFPWkNMeng0Ymo1Yi9MQklIejNiTk9zWlNtR3AxWi9hWDFkd1BaMlhOai83elovNGpuM095CkdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, - PermissionlessActivationEpoch: 248625, // Dec-11-2023 12:00:23 PM UTC + PermissionlessActivationEpoch: math.MaxUint64, // next version } From 0e857df486cea911513c6b36974074d602c9f6fd Mon Sep 17 00:00:00 2001 From: y0sher Date: Wed, 22 Nov 2023 15:05:47 +0200 Subject: [PATCH 49/54] enable prater in the gitlab ci --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 627b5f0e8c..f12adc8970 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -150,7 +150,7 @@ Deploy nodes to prod: # +---------------------------+ # | 🟠 Deploy SSV Prater nodes | # +---------------------------+ - #- .k8/production/prater/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 + - .k8/production/prater/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +----------------------------+ # | 🔴 Deploy SSV Mainnet nodes | @@ -196,7 +196,7 @@ Deploy exporter to prod: # +------------------------------+ # | 🟠 Deploy Prater exporter | # +------------------------------+ - # - .k8/production/prater/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + - .k8/production/prater/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT # # +------------------------------+ # │ 🔴 Deploy Mainnet exporter | From b789b240149538950697ccff0e634785b3508468 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 22 Nov 2023 16:53:43 +0200 Subject: [PATCH 50/54] fix: check signatures in `validatePartialSigMsgForSlot` (#1215) * fix: check signatures in `validatePartialSigMsgForSlot` * deploy to stage * revert --- protocol/v2/ssv/runner/runner.go | 3 +-- protocol/v2/ssv/runner/runner_signatures.go | 14 ++++++-------- protocol/v2/ssv/spectest/msg_processing_type.go | 2 -- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index ff833d8c31..9034d5da84 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -54,8 +54,7 @@ type BaseRunner struct { BeaconRoleType spectypes.BeaconRole // implementation vars - TimeoutF TimeoutF `json:"-"` - VerifySignatures bool `json:"-"` + TimeoutF TimeoutF `json:"-"` // highestDecidedSlot holds the highest decided duty slot and gets updated after each decided is reached highestDecidedSlot spec.Slot diff --git a/protocol/v2/ssv/runner/runner_signatures.go b/protocol/v2/ssv/runner/runner_signatures.go index edfc608ea7..54e4d9de1e 100644 --- a/protocol/v2/ssv/runner/runner_signatures.go +++ b/protocol/v2/ssv/runner/runner_signatures.go @@ -58,15 +58,13 @@ func (b *BaseRunner) validatePartialSigMsgForSlot( return errors.New("invalid partial sig slot") } - if b.VerifySignatures { - if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { - return errors.Wrap(err, "failed to verify PartialSignature") - } + if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { + return errors.Wrap(err, "failed to verify PartialSignature") + } - for _, msg := range signedMsg.Message.Messages { - if err := b.verifyBeaconPartialSignature(msg); err != nil { - return errors.Wrap(err, "could not verify Beacon partial Signature") - } + for _, msg := range signedMsg.Message.Messages { + if err := b.verifyBeaconPartialSignature(msg); err != nil { + return errors.Wrap(err, "could not verify Beacon partial Signature") } } diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 412b92b8da..b962418894 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -48,8 +48,6 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { } func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { - test.Runner.GetBaseRunner().VerifySignatures = true - v := ssvtesting.BaseValidator(logger, spectestingutils.KeySetForShare(test.Runner.GetBaseRunner().Share)) v.DutyRunners[test.Runner.GetBaseRunner().BeaconRoleType] = test.Runner v.Network = test.Runner.GetNetwork().(specqbft.Network) // TODO need to align From 20129b16bc3f755b6d67b6f1bab080052dea0157 Mon Sep 17 00:00:00 2001 From: Nikita Kryuchkov Date: Wed, 22 Nov 2023 21:41:26 +0400 Subject: [PATCH 51/54] Simplify duplicated proposal with different data check (#1207) * More logs for "duplicated proposal with different data" * Deploy to 41-44 * Deploy to 1-12 & 17-40 & 41-44 & 53-69 * Improve logging * Fix logging * Extend logs * Fix logging issues * Log slot and round * Log proposal data setting * Fix consensusData init * No deploy to 1-4 * Add validator and signer to logging * Simplify condition * Attempt to fix proposal data assignment * Revert deployment * Revert logs * Revert "Revert logs" This reverts commit 6871b56f4b0dc2d1405471866787a6043e52367d. * Revert "Revert deployment" This reverts commit 1bed180e576d32d6fa5b9dfa79fe05ac868405cf. * Remove heavy logs * Add root log * Fix root log * deploy to most of stage * deploy to more clusters. * change db for ssv-node-44 * print root as hex * deploy to all to prevent confusions * hack to update proposal before message is arriving. * deploy to explorer as well * remove the hack * add more verbose logs * add flag to not broadcast proposal on second quorum of RC * 1-48 deploy * Fix logging * go fmt * Deploy to 5-12 & 17-48 * Extend logs * Attempt to fix empty proposal data * Release 45-48 * Fix logs * Deploy to all nodes * Remove protocol changes * Disable deployment * Remove debugging leftovers * revert database reset * Revert "Remove debugging leftovers" This reverts commit 8d789d5b83be89fd0cf3a72378ef896e25d954b7. * Revert "Disable deployment" This reverts commit 673c162eb190f1f8f29cef58449d859139f56a41. * Revert the possible fix * track concurrency * validation locks by MessageID * Revert "validation locks by MessageID" This reverts commit a6d729f29279c6f77c8055b50489182709f24caa. * Revert "track concurrency" This reverts commit 628548953ff8e2e3bff986dca449e49d801a1d7f. * Revert "Revert the possible fix" This reverts commit 6513988b429a88134748ea7927bb483c832542f8. * Remove debugging leftovers * Disable deployment * Remove redundant code * Narrow proposal data check --------- Co-authored-by: MatheusFranco99 <48058141+MatheusFranco99@users.noreply.github.com> Co-authored-by: y0sher Co-authored-by: moshe-blox --- message/validation/consensus_validation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go index fde979826b..674f148ea6 100644 --- a/message/validation/consensus_validation.go +++ b/message/validation/consensus_validation.go @@ -240,7 +240,7 @@ func (mv *messageValidator) validateSignerBehaviorConsensus( return err } - if !(msgSlot > signerState.Slot || msgSlot == signerState.Slot && msgRound > signerState.Round) { + if msgSlot == signerState.Slot && msgRound == signerState.Round { if mv.hasFullData(signedMsg) && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedMsg.FullData) { return ErrDuplicatedProposalWithDifferentData } From 6dafce329ea5772f517238ba3ab3b45163dc4b21 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 22 Nov 2023 19:46:05 +0200 Subject: [PATCH 52/54] fix: race conditions in message validation (#1217) * fix: race conditions in message validation * refactor * fix nil map --- message/validation/validation.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/message/validation/validation.go b/message/validation/validation.go index 2a363efe25..c00b2b8ea3 100644 --- a/message/validation/validation.go +++ b/message/validation/validation.go @@ -77,8 +77,14 @@ type messageValidator struct { dutyStore *dutystore.Store ownOperatorID spectypes.OperatorID operatorIDToPubkeyCache *hashmap.Map[spectypes.OperatorID, *rsa.PublicKey] - selfPID peer.ID - selfAccept bool + + // validationLocks is a map of lock per SSV message ID to + // prevent concurrent access to the same state. + validationLocks map[spectypes.MessageID]*sync.Mutex + validationMutex sync.Mutex + + selfPID peer.ID + selfAccept bool } // NewMessageValidator returns a new MessageValidator with the given network configuration and options. @@ -88,6 +94,7 @@ func NewMessageValidator(netCfg networkconfig.NetworkConfig, opts ...Option) Mes metrics: &nopMetrics{}, netCfg: netCfg, operatorIDToPubkeyCache: hashmap.New[spectypes.OperatorID, *rsa.PublicKey](), + validationLocks: make(map[spectypes.MessageID]*sync.Mutex), } for _, opt := range opts { @@ -431,6 +438,17 @@ func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, return nil, descriptor, e } + // Lock this SSV message ID to prevent concurrent access to the same state. + mv.validationMutex.Lock() + mutex, ok := mv.validationLocks[msg.GetID()] + if !ok { + mutex = &sync.Mutex{} + mv.validationLocks[msg.GetID()] = mutex + } + mutex.Lock() + defer mutex.Unlock() + mv.validationMutex.Unlock() + descriptor.SSVMessageType = ssvMessage.MsgType if mv.nodeStorage != nil { From a4270b5f06e6e0e84507bb390de803a0f5eea02f Mon Sep 17 00:00:00 2001 From: moshe-blox Date: Tue, 5 Dec 2023 15:42:58 +0200 Subject: [PATCH 53/54] Permission Fork Schedule for Mainnet --- networkconfig/mainnet.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 9da6a060a5..4f977f917d 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -1,7 +1,6 @@ package networkconfig import ( - "math" "math/big" spectypes "github.com/bloxapp/ssv-spec/types" @@ -41,5 +40,5 @@ var Mainnet = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcU5Sd0xWSHNWMEloUjdjdUJkb1AKZnVwNTkydEJFSG0vRllDREZQbERMR2NVZ2NzZ29PdHBsV2hMRjBGSzIwQ3ppVi83WVZzcWpxcDh3VDExM3pBbQoxOTZZRlN6WmUzTFhOQXFRWlBwbDlpOVJxdVJJMGlBT2xiWUp0ampJRjd2ZVZLbVdybzMwWTZDV3JPcHpVQ1BPClRGVEpGZ0hvZmtQT2pabmprNURtdDg2ZURveUxzenJQZWQ0LzlyR2NNVUp4WnJBSjEvbFR1ajNaWWVJUk0wS04KUVQ0eitPb3p0T0dBeDVVcUk2THpQL3NGOWRJM3BzM3BIb3dXOWF2RHp3Qm94Y3hWam14NWhRMXowOTN4MnlkYgpWcjgxNDgzTzdqUkt6eFpXeEduOFJzZUROZkxwSi93VFJiQ0lVOFhwUC9IKzd6TWNGMG1HbVlUcjAvcWR1bVNsCjNRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdmRWVVJ0OFgxbFA5VDVSUUdYdVkKcFpZWjVBb3VuSEdUakMvQ1FoTmQ5RC9kT2kvSDUwVW1PdVBpTzhYYUF4UFRGcGIrZ2xCeGJRRHVQUGN1cENPdQpKN09lVTBvdzdsQjVMclZlWWt3RExnSHY3bDQwcjRWVTM3NlFueGhuS0JyVHNkaWdmZHJYUWZveGRhajVQQ0VYCnFjK1ozNXFPUmpCZ3dublRlbEJjc2NLMHorSkJaQzU0OXFOWThMbm9aMTBuRFptdW1YVDlac3dISCtJVkZacDYKMEZTY0k0V1V5U1gxVnJJT2tSandoSWlCSFk3YkhrZ01Bci9xeStuRmlFUUVRV2Q2VXAwOWtkS0hNVmdtVFp4KwprQXZRbFZ0Z3luYkFPWkNMeng0Ymo1Yi9MQklIejNiTk9zWlNtR3AxWi9hWDFkd1BaMlhOai83elovNGpuM095CkdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, - PermissionlessActivationEpoch: math.MaxUint64, // next version + PermissionlessActivationEpoch: 249056, // Dec-13-2023 09:58:47 AM UTC } From 5e7e0d726dfb2f91bb4173374c56546baa7fbcf2 Mon Sep 17 00:00:00 2001 From: moshe-blox <89339422+moshe-blox@users.noreply.github.com> Date: Wed, 6 Dec 2023 15:00:43 +0200 Subject: [PATCH 54/54] fix: stale operator ID in `p2pNetwork` (#1229) (#1230) * fix: stale operator ID in `p2pNetwork` --------- Co-authored-by: Lior Rutenberg --- cli/operator/node.go | 7 +++++-- network/p2p/config.go | 2 +- network/p2p/p2p.go | 2 +- network/p2p/p2p_pubsub.go | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cli/operator/node.go b/cli/operator/node.go index 5fdb49e192..302e7d68fe 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -165,10 +165,13 @@ var StartNodeCmd = &cobra.Command{ logger.Fatal("could not connect to execution client", zap.Error(err)) } + var validatorCtrl validator.Controller cfg.P2pNetworkConfig.Permissioned = permissioned cfg.P2pNetworkConfig.NodeStorage = nodeStorage cfg.P2pNetworkConfig.OperatorPubKeyHash = format.OperatorID(operatorData.PublicKey) - cfg.P2pNetworkConfig.OperatorID = operatorData.ID + cfg.P2pNetworkConfig.OperatorID = func() spectypes.OperatorID { + return validatorCtrl.GetOperatorData().ID + } cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode cfg.P2pNetworkConfig.Network = networkConfig @@ -238,7 +241,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.Metrics = metricsReporter cfg.SSVOptions.Metrics = metricsReporter - validatorCtrl := validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) + validatorCtrl = validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) cfg.SSVOptions.ValidatorController = validatorCtrl operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider) diff --git a/network/p2p/config.go b/network/p2p/config.go index 9dd29ca7fc..3880b52311 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -63,7 +63,7 @@ type Config struct { // OperatorPubKeyHash is hash of operator public key, used for identity, optional OperatorPubKeyHash string // OperatorID contains numeric operator ID - OperatorID spectypes.OperatorID + OperatorID func() spectypes.OperatorID // Router propagate incoming network messages to the responsive components Router network.MessageRouter // UserAgent to use by libp2p identify protocol diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 96c8d0771e..9ee5c04126 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -76,7 +76,7 @@ type p2pNetwork struct { nodeStorage operatorstorage.Storage operatorPKHashToPKCache *hashmap.Map[string, []byte] // used for metrics operatorPrivateKey *rsa.PrivateKey - operatorID spectypes.OperatorID + operatorID func() spectypes.OperatorID } // New creates a new p2p network diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index dc28fdffab..1ec65082a6 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -70,7 +70,7 @@ func (n *p2pNetwork) Broadcast(msg *spectypes.SSVMessage) error { return err } - encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, n.operatorID, signature) + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, n.operatorID(), signature) } vpk := msg.GetID().GetPubKey()