diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index afcc42e934..f12adc8970 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -42,52 +42,66 @@ Build stage Docker image: only: - stage -Deploy nodes to stage: +# +---------------------+ +# | STAGE HETZNER NODES | +# +---------------------+ + + +Deploy nodes to hetzner stage: stage: deploy tags: - - blox-infra-stage + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 script: - - apk add bash - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION - - export SSV_NODES_CPU_LIMIT=$STAGE_SSV_NODES_CPU_LIMIT - - export SSV_NODES_MEM_LIMIT=$STAGE_SSV_NODES_MEM_LIMIT - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl - - chmod 755 kubectl - - mv kubectl /usr/bin/ + - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT + - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts # # +--------------------+ # | Deploy SSV nodes | # +--------------------+ - - .k8/stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - - .k8/stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - # - .k8/stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - # - .k8/stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT - # - # +-------------------+ - # │ Deploy Bootnode | - # +-------------------+ - # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ - # - .k8/stage/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE 1000m 1000m + - .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT only: - stage -Deploy exporter to stage: +Deploy exporter to hetzner stage: stage: deploy tags: - - blox-infra-stage + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 script: - - apk add bash - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT - export SSV_EXPORTER_MEM_LIMIT=$STAGE_SSV_EXPORTER_MEM_LIMIT - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl - - chmod 755 kubectl - - mv kubectl /usr/bin/ - - .k8/stage/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE blox-infra-stage kubernetes-admin@blox-infra stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + - .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT only: - stage - # +---------------+ # | Prod | # +---------------+ @@ -122,6 +136,16 @@ Deploy nodes to prod: - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl - chmod +x ./kubectl - mv ./kubectl /usr/bin/kubectl + # +-------------------------------+ + # | 🟠 Deploy SSV Holesky nodes | + # +-------------------------------+ + - .k8/production/holesky/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 + # + # +-------------------------------+ + # │ 🟠 Deploy Holesky Bootnode | + # +-------------------------------+ + # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ + #- .k8/production/holesky/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # +---------------------------+ # | 🟠 Deploy SSV Prater nodes | @@ -134,15 +158,15 @@ Deploy nodes to prod: # - .k8/production/mainnet/scripts/deploy-cluster-1-4.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # # - # +--------------------------+ + # +-----------------------------+ # │ 🟠 Deploy Prater Bootnode | - # +--------------------------+ + # +-----------------------------+ # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ # - .k8/production/prater/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 # - # +---------------------------+ + # +------------------------------+ # │ 🔴 Deploy Mainnet Bootnode | - # +---------------------------+ + # +------------------------------+ # █▓▒░ Keep commented unless you're testing the bootnode ░▒▓█ # - .k8/production/mainnet/scripts/deploy-boot-nodes.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $PROD_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT_V3 $SSV_NODES_MEM_LIMIT_V3 @@ -164,16 +188,20 @@ Deploy exporter to prod: - chmod +x ./kubectl - mv ./kubectl /usr/bin/kubectl # - # +---------------------------+ + # +-------------------------------+ + # | 🟠 Deploy Holesky exporter | + # +-------------------------------+ + - .k8/production/holesky/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + # + # +------------------------------+ # | 🟠 Deploy Prater exporter | - # +---------------------------+ + # +------------------------------+ - .k8/production/prater/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT # - # +---------------------------+ + # +------------------------------+ # │ 🔴 Deploy Mainnet exporter | - # +---------------------------+ + # +------------------------------+ # - .k8/production/mainnet/scripts/deploy-exporters.sh $DOCKER_REPO_INFRA_PROD $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_PROD blox-infra-prod kubernetes-admin@blox-infra-prod ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT only: - main - diff --git a/.k8/stage/scripts/deploy-cluster-1--4.sh b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh similarity index 94% rename from .k8/stage/scripts/deploy-cluster-1--4.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh index 5516cb4e39..f2a8669b7d 100755 --- a/.k8/stage/scripts/deploy-cluster-1--4.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh @@ -103,12 +103,12 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( - "ssv-node-v2-1-deployment.yml" - "ssv-node-v2-2-deployment.yml" - "ssv-node-v2-3-deployment.yml" - "ssv-node-v2-4-deployment.yml" + "ssv-node-1-deployment.yml" + "ssv-node-2-deployment.yml" + "ssv-node-3-deployment.yml" + "ssv-node-4-deployment.yml" ) if [[ -d $DIR ]]; then diff --git a/.k8/stage/scripts/deploy-cluster-13--16.sh b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh similarity index 94% rename from .k8/stage/scripts/deploy-cluster-13--16.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh index 9b3772bdfe..1de999f0e8 100755 --- a/.k8/stage/scripts/deploy-cluster-13--16.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh @@ -103,12 +103,12 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( - "ssv-node-v3-1-deployment.yml" - "ssv-node-v3-2-deployment.yml" - "ssv-node-v3-3-deployment.yml" - "ssv-node-v3-4-deployment.yml" + "ssv-node-13-deployment.yml" + "ssv-node-14-deployment.yml" + "ssv-node-15-deployment.yml" + "ssv-node-16-deployment.yml" ) if [[ -d $DIR ]]; then diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh new file mode 100755 index 0000000000..812a48e3f6 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-17-deployment.yml" + "ssv-node-18-deployment.yml" + "ssv-node-19-deployment.yml" + "ssv-node-20-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh new file mode 100755 index 0000000000..57c89f2fdd --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-21-deployment.yml" + "ssv-node-22-deployment.yml" + "ssv-node-23-deployment.yml" + "ssv-node-24-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh new file mode 100755 index 0000000000..134e83dad8 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-25-deployment.yml" + "ssv-node-26-deployment.yml" + "ssv-node-27-deployment.yml" + "ssv-node-28-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh new file mode 100755 index 0000000000..6e721e8342 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-29-deployment.yml" + "ssv-node-30-deployment.yml" + "ssv-node-31-deployment.yml" + "ssv-node-32-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh new file mode 100755 index 0000000000..deb2d911e5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-33-deployment.yml" + "ssv-node-34-deployment.yml" + "ssv-node-35-deployment.yml" + "ssv-node-36-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh new file mode 100755 index 0000000000..c82c77ce42 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-37-deployment.yml" + "ssv-node-38-deployment.yml" + "ssv-node-39-deployment.yml" + "ssv-node-40-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh new file mode 100755 index 0000000000..c4684e685e --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-41-deployment.yml" + "ssv-node-42-deployment.yml" + "ssv-node-43-deployment.yml" + "ssv-node-44-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh new file mode 100755 index 0000000000..11a54c9722 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-45-deployment.yml" + "ssv-node-46-deployment.yml" + "ssv-node-47-deployment.yml" + "ssv-node-48-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh new file mode 100755 index 0000000000..dcc90d2742 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-49-deployment.yml" + "ssv-node-50-deployment.yml" + "ssv-node-51-deployment.yml" + "ssv-node-52-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-cluster-5--8.sh b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh similarity index 91% rename from .k8/stage/scripts/deploy-cluster-5--8.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh index e3bbadd102..e3bb9e94a2 100755 --- a/.k8/stage/scripts/deploy-cluster-5--8.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh @@ -103,12 +103,12 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( - "ssv-node-v2-5-deployment.yml" - "ssv-node-v2-6-deployment.yml" - "ssv-node-v2-7-deployment.yml" - "ssv-node-v2-8-deployment.yml" + "ssv-node-5-deployment.yml" + "ssv-node-6-deployment.yml" + "ssv-node-7-deployment.yml" + "ssv-node-8-deployment.yml" ) if [[ -d $DIR ]]; then @@ -121,7 +121,7 @@ if [[ -d $DIR ]]; then -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 done fi diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh new file mode 100755 index 0000000000..9efd728b17 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-53-deployment.yml" + "ssv-node-54-deployment.yml" + "ssv-node-55-deployment.yml" + "ssv-node-56-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh new file mode 100755 index 0000000000..1be68e57f5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-57-deployment.yml" + "ssv-node-58-deployment.yml" + "ssv-node-59-deployment.yml" + "ssv-node-60-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh new file mode 100755 index 0000000000..2fc32263a0 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-61-deployment.yml" + "ssv-node-62-deployment.yml" + "ssv-node-63-deployment.yml" + "ssv-node-64-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh new file mode 100755 index 0000000000..fe57c84c75 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-65-deployment.yml" + "ssv-node-66-deployment.yml" + "ssv-node-67-deployment.yml" + "ssv-node-68-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh new file mode 100755 index 0000000000..229536c0d4 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-69-deployment.yml" + "ssv-node-70-deployment.yml" + "ssv-node-71-deployment.yml" + "ssv-node-72-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-cluster-9--12.sh b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh similarity index 99% rename from .k8/stage/scripts/deploy-cluster-9--12.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh index 057b7205af..81fe2de698 100755 --- a/.k8/stage/scripts/deploy-cluster-9--12.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh @@ -103,7 +103,7 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( "ssv-node-9-deployment.yml" "ssv-node-10-deployment.yml" diff --git a/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh new file mode 100755 index 0000000000..9a899ef3d3 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/ssv-exporter-1.yml b/.k8/hetzner-stage/ssv-exporter-holesky.yml similarity index 54% rename from .k8/stage/ssv-exporter-1.yml rename to .k8/hetzner-stage/ssv-exporter-holesky.yml index 8433e50901..6f71edfb51 100644 --- a/.k8/stage/ssv-exporter-1.yml +++ b/.k8/hetzner-stage/ssv-exporter-holesky.yml @@ -1,76 +1,43 @@ --- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: ssv-exporter - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ws-exporter.REPLACE_DOMAIN_SUFFIX" - gateways: - - ssv-exporter - http: - - route: - - destination: - host: ssv-exporter - port: - number: 14000 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: ssv-exporter - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway-int - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ws-exporter.REPLACE_DOMAIN_SUFFIX" ---- apiVersion: v1 kind: Service metadata: - name: ssv-exporter + name: ssv-exporter-holesky namespace: REPLACE_NAMESPACE labels: - app: ssv-exporter + app: ssv-exporter-holesky spec: type: ClusterIP ports: - - port: 12000 + - port: 12073 protocol: UDP - targetPort: 12000 - name: port-12000 - - port: 13000 + targetPort: 12073 + name: port-12073 + - port: 13073 protocol: TCP - targetPort: 13000 - name: port-13000 - - port: 14000 + targetPort: 13073 + name: port-13073 + - port: 14073 protocol: TCP - targetPort: 14000 - name: port-14000 - - port: 15000 + targetPort: 14073 + name: port-14073 + - port: 15073 protocol: TCP - targetPort: 15000 - name: port-15000 - - port: 16000 + targetPort: 15073 + name: port-15073 + - port: 16073 protocol: TCP - targetPort: 16000 - name: port-16000 + targetPort: 16073 + name: port-16073 selector: - app: ssv-exporter + app: ssv-exporter-holesky --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-exporter - name: ssv-exporter + app: ssv-exporter-holesky + name: ssv-exporter-holesky namespace: REPLACE_NAMESPACE spec: replicas: REPLACE_REPLICAS @@ -78,11 +45,11 @@ spec: type: Recreate selector: matchLabels: - app: ssv-exporter + app: ssv-exporter-holesky template: metadata: labels: - app: ssv-exporter + app: ssv-exporter-holesky spec: affinity: nodeAffinity: @@ -92,9 +59,9 @@ spec: - key: kubernetes.io/role operator: In values: - - ssv-main + - ssv-exporter containers: - - name: ssv-exporter + - name: ssv-exporter-holesky image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG imagePullPolicy: Always resources: @@ -103,22 +70,22 @@ spec: memory: REPLACE_EXPORTER_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12000 - name: port-12000 - hostPort: 12000 + - containerPort: 12073 + name: port-12073 + hostPort: 12073 protocol: UDP - - containerPort: 13000 - name: port-13000 - hostPort: 13000 - - containerPort: 14000 - name: port-14000 - hostPort: 14000 - - containerPort: 15000 - name: port-15000 - hostPort: 15000 - - containerPort: 16000 - name: port-16000 - hostPort: 16000 + - containerPort: 13073 + name: port-13073 + hostPort: 13073 + - containerPort: 14073 + name: port-14073 + hostPort: 14073 + - containerPort: 15073 + name: port-15073 + hostPort: 15073 + - containerPort: 16073 + name: port-16073 + hostPort: 16073 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -140,23 +107,23 @@ spec: - name: DISCOVERY_TYPE_KEY value: "discv5" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT - value: "15000" + value: "15073" - name: SSV_API_PORT - value: "16000" + value: "16073" - name: ENABLE_PROFILE value: "true" - name: UDP_PORT - value: "12000" + value: "12073" - name: TCP_PORT - value: "13000" + value: "13073" - name: WS_API_PORT - value: "14000" + value: "14073" - name: FULLNODE value: "true" - name: EXPORTER @@ -169,19 +136,17 @@ spec: value: "0xffffffffffffffffffffffffffffffff" volumeMounts: - mountPath: /data - name: ssv-exporter + name: ssv-exporter-holesky - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-exporter-cm + name: ssv-exporter-holesky-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-exporter + - name: ssv-exporter-holesky persistentVolumeClaim: - claimName: ssv-exporter - - name: ssv-exporter-cm + claimName: ssv-exporter-holesky + - name: ssv-exporter-holesky-cm configMap: - name: ssv-exporter-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-exporter-holesky-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml new file mode 100644 index 0000000000..086be0d4f4 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-1-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-1 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12001 + protocol: UDP + targetPort: 12001 + name: port-12001 + - port: 13001 + protocol: TCP + targetPort: 13001 + name: port-13001 + - port: 15001 + protocol: TCP + targetPort: 15001 + name: metrics + - port: 16001 + protocol: TCP + targetPort: 16001 + name: port-16001 + selector: + app: ssv-node-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-1 + name: ssv-node-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-1 + template: + metadata: + labels: + app: ssv-node-1 + spec: + containers: + - name: ssv-node-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12001 + name: port-12001 + hostPort: 12001 + protocol: UDP + - containerPort: 13001 + name: port-13001 + hostPort: 13001 + - containerPort: 15001 + name: port-15001 + hostPort: 15001 + - containerPort: 16001 + name: port-16001 + hostPort: 16001 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15001" + - name: SSV_API_PORT + value: "16001" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-1-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-1 + persistentVolumeClaim: + claimName: ssv-node-1 + - name: ssv-node-1-cm + configMap: + name: ssv-node-1-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml new file mode 100644 index 0000000000..7f12a82051 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-10-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-10 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12010 + protocol: UDP + targetPort: 12010 + name: port-12010 + - port: 13010 + protocol: TCP + targetPort: 13010 + name: port-13010 + - port: 15010 + protocol: TCP + targetPort: 15010 + name: metrics + - port: 16010 + protocol: TCP + targetPort: 16010 + name: port-16010 + selector: + app: ssv-node-10 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-10 + name: ssv-node-10 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-10 + template: + metadata: + labels: + app: ssv-node-10 + spec: + containers: + - name: ssv-node-10 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12010 + name: port-12010 + protocol: UDP + hostPort: 12010 + - containerPort: 13010 + name: port-13010 + hostPort: 13010 + - containerPort: 15010 + name: port-15010 + hostPort: 15010 + - containerPort: 16010 + name: port-16010 + hostPort: 16010 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15010" + - name: SSV_API_PORT + value: "16010" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-10 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-10-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-10 + persistentVolumeClaim: + claimName: ssv-node-10 + - name: ssv-node-10-cm + configMap: + name: ssv-node-10-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml new file mode 100644 index 0000000000..83b4bd283d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-11-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-11 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12011 + protocol: UDP + targetPort: 12011 + name: port-12011 + - port: 13011 + protocol: TCP + targetPort: 13011 + name: port-13011 + - port: 15011 + protocol: TCP + targetPort: 15011 + name: metrics + - port: 16011 + protocol: TCP + targetPort: 16011 + name: port-16011 + selector: + app: ssv-node-11 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-11 + name: ssv-node-11 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-11 + template: + metadata: + labels: + app: ssv-node-11 + spec: + containers: + - name: ssv-node-11 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12011 + name: port-12011 + protocol: UDP + hostPort: 12011 + - containerPort: 13011 + name: port-13011 + hostPort: 13011 + - containerPort: 15011 + name: port-15011 + hostPort: 15011 + - containerPort: 16011 + name: port-16011 + hostPort: 16011 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15011" + - name: SSV_API_PORT + value: "16011" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-11 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-11-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-11 + persistentVolumeClaim: + claimName: ssv-node-11 + - name: ssv-node-11-cm + configMap: + name: ssv-node-11-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml new file mode 100644 index 0000000000..bb5a5364b4 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-12-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-12 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12012 + protocol: UDP + targetPort: 12012 + name: port-12012 + - port: 13012 + protocol: TCP + targetPort: 13012 + name: port-13012 + - port: 15012 + protocol: TCP + targetPort: 15012 + name: metrics + - port: 16012 + protocol: TCP + targetPort: 16012 + name: port-16012 + selector: + app: ssv-node-12 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-12 + name: ssv-node-12 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-12 + template: + metadata: + labels: + app: ssv-node-12 + spec: + containers: + - name: ssv-node-12 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12012 + name: port-12012 + protocol: UDP + hostPort: 12012 + - containerPort: 13012 + name: port-13012 + hostPort: 13012 + - containerPort: 15012 + name: port-15012 + hostPort: 15012 + - containerPort: 16012 + name: port-16012 + hostPort: 16012 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15012" + - name: SSV_API_PORT + value: "16012" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-12 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-12-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-12 + persistentVolumeClaim: + claimName: ssv-node-12 + - name: ssv-node-12-cm + configMap: + name: ssv-node-12-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml new file mode 100644 index 0000000000..22b6d23514 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-13-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-13 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: metrics + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-node-13 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-13 + name: ssv-node-13 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-13 + template: + metadata: + labels: + app: ssv-node-13 + spec: + containers: + - name: ssv-node-13 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + protocol: UDP + hostPort: 12013 + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-13 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-13-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-13 + persistentVolumeClaim: + claimName: ssv-node-13 + - name: ssv-node-13-cm + configMap: + name: ssv-node-13-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml new file mode 100644 index 0000000000..62cd8d850c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-14-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-14 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12014 + protocol: UDP + targetPort: 12014 + name: port-12014 + - port: 13014 + protocol: TCP + targetPort: 13014 + name: port-13014 + - port: 15014 + protocol: TCP + targetPort: 15014 + name: metrics + - port: 16014 + protocol: TCP + targetPort: 16014 + name: port-16014 + selector: + app: ssv-node-14 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-14 + name: ssv-node-14 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-14 + template: + metadata: + labels: + app: ssv-node-14 + spec: + containers: + - name: ssv-node-14 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12014 + name: port-12014 + protocol: UDP + hostPort: 12014 + - containerPort: 13014 + name: port-13014 + hostPort: 13014 + - containerPort: 15014 + name: port-15014 + hostPort: 15014 + - containerPort: 16014 + name: port-16014 + hostPort: 16014 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15014" + - name: SSV_API_PORT + value: "16014" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-14 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-14-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-14 + persistentVolumeClaim: + claimName: ssv-node-14 + - name: ssv-node-14-cm + configMap: + name: ssv-node-14-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml new file mode 100644 index 0000000000..52393fc8b7 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-15-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-15 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12015 + protocol: UDP + targetPort: 12015 + name: port-12015 + - port: 13015 + protocol: TCP + targetPort: 13015 + name: port-13015 + - port: 15015 + protocol: TCP + targetPort: 15015 + name: metrics + - port: 16015 + protocol: TCP + targetPort: 16015 + name: port-16015 + selector: + app: ssv-node-15 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-15 + name: ssv-node-15 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-15 + template: + metadata: + labels: + app: ssv-node-15 + spec: + containers: + - name: ssv-node-15 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12015 + name: port-12015 + protocol: UDP + hostPort: 12015 + - containerPort: 13015 + name: port-13015 + hostPort: 13015 + - containerPort: 15015 + name: port-15015 + hostPort: 15015 + - containerPort: 16015 + name: port-16015 + hostPort: 16015 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15015" + - name: SSV_API_PORT + value: "16015" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-15 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-15-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-15 + persistentVolumeClaim: + claimName: ssv-node-15 + - name: ssv-node-15-cm + configMap: + name: ssv-node-15-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml new file mode 100644 index 0000000000..611ac23afa --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-16-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-16 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12016 + protocol: UDP + targetPort: 12016 + name: port-12016 + - port: 13016 + protocol: TCP + targetPort: 13016 + name: port-13016 + - port: 15016 + protocol: TCP + targetPort: 15016 + name: metrics + - port: 16016 + protocol: TCP + targetPort: 16016 + name: port-16016 + selector: + app: ssv-node-16 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-16 + name: ssv-node-16 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-16 + template: + metadata: + labels: + app: ssv-node-16 + spec: + containers: + - name: ssv-node-16 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12016 + name: port-12016 + protocol: UDP + hostPort: 12016 + - containerPort: 13016 + name: port-13016 + hostPort: 13016 + - containerPort: 15016 + name: port-15016 + hostPort: 15016 + - containerPort: 16016 + name: port-16016 + hostPort: 16016 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15016" + - name: SSV_API_PORT + value: "16016" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-16 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-16-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-16 + persistentVolumeClaim: + claimName: ssv-node-16 + - name: ssv-node-16-cm + configMap: + name: ssv-node-16-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml new file mode 100644 index 0000000000..4dfdcbe204 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-17-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-17 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12017 + protocol: UDP + targetPort: 12017 + name: port-12017 + - port: 13017 + protocol: TCP + targetPort: 13017 + name: port-13017 + - port: 15017 + protocol: TCP + targetPort: 15017 + name: metrics + - port: 16017 + protocol: TCP + targetPort: 16017 + name: port-16017 + selector: + app: ssv-node-17 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-17 + name: ssv-node-17 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-17 + template: + metadata: + labels: + app: ssv-node-17 + spec: + containers: + - name: ssv-node-17 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12017 + name: port-12017 + protocol: UDP + hostPort: 12017 + - containerPort: 13017 + name: port-13017 + hostPort: 13017 + - containerPort: 15017 + name: port-15017 + hostPort: 15017 + - containerPort: 16017 + name: port-16017 + hostPort: 16017 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15017" + - name: SSV_API_PORT + value: "16017" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-17 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-17-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-17 + persistentVolumeClaim: + claimName: ssv-node-17 + - name: ssv-node-17-cm + configMap: + name: ssv-node-17-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml new file mode 100644 index 0000000000..3a6cc86755 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-18-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-18 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12018 + protocol: UDP + targetPort: 12018 + name: port-12018 + - port: 13018 + protocol: TCP + targetPort: 13018 + name: port-13018 + - port: 15018 + protocol: TCP + targetPort: 15018 + name: metrics + - port: 16018 + protocol: TCP + targetPort: 16018 + name: port-16018 + selector: + app: ssv-node-18 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-18 + name: ssv-node-18 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-18 + template: + metadata: + labels: + app: ssv-node-18 + spec: + containers: + - name: ssv-node-18 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12018 + name: port-12018 + protocol: UDP + hostPort: 12018 + - containerPort: 13018 + name: port-13018 + hostPort: 13018 + - containerPort: 15018 + name: port-15018 + hostPort: 15018 + - containerPort: 16018 + name: port-16018 + hostPort: 16018 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15018" + - name: SSV_API_PORT + value: "16018" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-18 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-18-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-18 + persistentVolumeClaim: + claimName: ssv-node-18 + - name: ssv-node-18-cm + configMap: + name: ssv-node-18-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml new file mode 100644 index 0000000000..6afc020c66 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-19-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-19 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12019 + protocol: UDP + targetPort: 12019 + name: port-12019 + - port: 13019 + protocol: TCP + targetPort: 13019 + name: port-13019 + - port: 15019 + protocol: TCP + targetPort: 15019 + name: metrics + - port: 16019 + protocol: TCP + targetPort: 16019 + name: port-16019 + selector: + app: ssv-node-19 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-19 + name: ssv-node-19 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-19 + template: + metadata: + labels: + app: ssv-node-19 + spec: + containers: + - name: ssv-node-19 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12019 + name: port-12019 + protocol: UDP + hostPort: 12019 + - containerPort: 13019 + name: port-13019 + hostPort: 13019 + - containerPort: 15019 + name: port-15019 + hostPort: 15019 + - containerPort: 16019 + name: port-16019 + hostPort: 16019 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15019" + - name: SSV_API_PORT + value: "16019" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-19 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-19-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-19 + persistentVolumeClaim: + claimName: ssv-node-19 + - name: ssv-node-19-cm + configMap: + name: ssv-node-19-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml new file mode 100644 index 0000000000..3e7c411852 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-2-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-2 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12002 + protocol: UDP + targetPort: 12002 + name: port-12002 + - port: 13002 + protocol: TCP + targetPort: 13002 + name: port-13002 + - port: 15002 + protocol: TCP + targetPort: 15002 + name: metrics + - port: 16002 + protocol: TCP + targetPort: 16002 + name: port-16002 + selector: + app: ssv-node-2 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-2 + name: ssv-node-2 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-2 + template: + metadata: + labels: + app: ssv-node-2 + spec: + containers: + - name: ssv-node-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12002 + name: port-12002 + protocol: UDP + hostPort: 12002 + - containerPort: 13002 + name: port-13002 + hostPort: 13002 + - containerPort: 15002 + name: port-15002 + hostPort: 15002 + - containerPort: 16002 + name: port-16002 + hostPort: 16002 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15002" + - name: SSV_API_PORT + value: "16002" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-2 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-2-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-2 + persistentVolumeClaim: + claimName: ssv-node-2 + - name: ssv-node-2-cm + configMap: + name: ssv-node-2-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml new file mode 100644 index 0000000000..e4a1bbe9dc --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-20-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-20 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12020 + protocol: UDP + targetPort: 12020 + name: port-12020 + - port: 13020 + protocol: TCP + targetPort: 13020 + name: port-13020 + - port: 15020 + protocol: TCP + targetPort: 15020 + name: metrics + - port: 16020 + protocol: TCP + targetPort: 16020 + name: port-16020 + selector: + app: ssv-node-20 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-20 + name: ssv-node-20 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-20 + template: + metadata: + labels: + app: ssv-node-20 + spec: + containers: + - name: ssv-node-20 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12020 + name: port-12020 + protocol: UDP + hostPort: 12020 + - containerPort: 13020 + name: port-13020 + hostPort: 13020 + - containerPort: 15020 + name: port-15020 + hostPort: 15020 + - containerPort: 16020 + name: port-16020 + hostPort: 16020 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15020" + - name: SSV_API_PORT + value: "16020" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-20 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-20-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-20 + persistentVolumeClaim: + claimName: ssv-node-20 + - name: ssv-node-20-cm + configMap: + name: ssv-node-20-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml new file mode 100644 index 0000000000..e91e607a6b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-21-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-21 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12021 + protocol: UDP + targetPort: 12021 + name: port-12021 + - port: 13021 + protocol: TCP + targetPort: 13021 + name: port-13021 + - port: 15021 + protocol: TCP + targetPort: 15021 + name: metrics + - port: 16021 + protocol: TCP + targetPort: 16021 + name: port-16021 + selector: + app: ssv-node-21 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-21 + name: ssv-node-21 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-21 + template: + metadata: + labels: + app: ssv-node-21 + spec: + containers: + - name: ssv-node-21 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + protocol: UDP + hostPort: 12021 + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-21 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-21-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-21 + persistentVolumeClaim: + claimName: ssv-node-21 + - name: ssv-node-21-cm + configMap: + name: ssv-node-21-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml new file mode 100644 index 0000000000..587cfcb02f --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-22-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-22 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12022 + protocol: UDP + targetPort: 12022 + name: port-12022 + - port: 13022 + protocol: TCP + targetPort: 13022 + name: port-13022 + - port: 15022 + protocol: TCP + targetPort: 15022 + name: metrics + - port: 16022 + protocol: TCP + targetPort: 16022 + name: port-16022 + selector: + app: ssv-node-22 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-22 + name: ssv-node-22 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-22 + template: + metadata: + labels: + app: ssv-node-22 + spec: + containers: + - name: ssv-node-22 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + protocol: UDP + hostPort: 12022 + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-22 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-22-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-22 + persistentVolumeClaim: + claimName: ssv-node-22 + - name: ssv-node-22-cm + configMap: + name: ssv-node-22-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-23-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml new file mode 100644 index 0000000000..2b8bcf79fd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-23-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-23 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12023 + protocol: UDP + targetPort: 12023 + name: port-12023 + - port: 13023 + protocol: TCP + targetPort: 13023 + name: port-13023 + - port: 15023 + protocol: TCP + targetPort: 15023 + name: metrics + - port: 16023 + protocol: TCP + targetPort: 16023 + name: port-16023 + selector: + app: ssv-node-23 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-23 + name: ssv-node-23 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-23 + template: + metadata: + labels: + app: ssv-node-23 + spec: + containers: + - name: ssv-node-23 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12023 + name: port-12023 + protocol: UDP + hostPort: 12023 + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15023" + - name: SSV_API_PORT + value: "16023" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-23 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-23-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-23 + persistentVolumeClaim: + claimName: ssv-node-23 + - name: ssv-node-23-cm + configMap: + name: ssv-node-23-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-24-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml new file mode 100644 index 0000000000..200c013aa5 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-24-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-24 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12424 + protocol: UDP + targetPort: 12424 + name: port-12424 + - port: 13024 + protocol: TCP + targetPort: 13024 + name: port-13024 + - port: 15024 + protocol: TCP + targetPort: 15024 + name: metrics + - port: 16024 + protocol: TCP + targetPort: 16024 + name: port-16024 + selector: + app: ssv-node-24 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-24 + name: ssv-node-24 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-24 + template: + metadata: + labels: + app: ssv-node-24 + spec: + containers: + - name: ssv-node-24 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12424 + name: port-12424 + protocol: UDP + hostPort: 12424 + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15024" + - name: SSV_API_PORT + value: "16024" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-24 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-24-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-24 + persistentVolumeClaim: + claimName: ssv-node-24 + - name: ssv-node-24-cm + configMap: + name: ssv-node-24-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-25-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml new file mode 100644 index 0000000000..32570b1800 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-25-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-25 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12025 + protocol: UDP + targetPort: 12025 + name: port-12025 + - port: 13025 + protocol: TCP + targetPort: 13025 + name: port-13025 + - port: 15025 + protocol: TCP + targetPort: 15025 + name: metrics + - port: 16025 + protocol: TCP + targetPort: 16025 + name: port-16025 + selector: + app: ssv-node-25 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-25 + name: ssv-node-25 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-25 + template: + metadata: + labels: + app: ssv-node-25 + spec: + containers: + - name: ssv-node-25 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12025 + name: port-12025 + protocol: UDP + hostPort: 12025 + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15025" + - name: SSV_API_PORT + value: "16025" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-25 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-25-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-25 + persistentVolumeClaim: + claimName: ssv-node-25 + - name: ssv-node-25-cm + configMap: + name: ssv-node-25-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-26-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml new file mode 100644 index 0000000000..e1931ba6b4 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-26-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-26 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12026 + protocol: UDP + targetPort: 12026 + name: port-12026 + - port: 13026 + protocol: TCP + targetPort: 13026 + name: port-13026 + - port: 15026 + protocol: TCP + targetPort: 15026 + name: metrics + - port: 16026 + protocol: TCP + targetPort: 16026 + name: port-16026 + selector: + app: ssv-node-26 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-26 + name: ssv-node-26 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-26 + template: + metadata: + labels: + app: ssv-node-26 + spec: + containers: + - name: ssv-node-26 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12026 + name: port-12026 + protocol: UDP + hostPort: 12026 + - containerPort: 13026 + name: port-13026 + hostPort: 13026 + - containerPort: 15026 + name: port-15026 + hostPort: 15026 + - containerPort: 16026 + name: port-16026 + hostPort: 16026 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15026" + - name: SSV_API_PORT + value: "16026" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-26 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-26-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-26 + persistentVolumeClaim: + claimName: ssv-node-26 + - name: ssv-node-26-cm + configMap: + name: ssv-node-26-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml new file mode 100644 index 0000000000..cbda5608a0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-27-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-27 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12027 + protocol: UDP + targetPort: 12027 + name: port-12027 + - port: 13027 + protocol: TCP + targetPort: 13027 + name: port-13027 + - port: 15027 + protocol: TCP + targetPort: 15027 + name: metrics + - port: 16027 + protocol: TCP + targetPort: 16027 + name: port-16027 + selector: + app: ssv-node-27 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-27 + name: ssv-node-27 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-27 + template: + metadata: + labels: + app: ssv-node-27 + spec: + containers: + - name: ssv-node-27 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12027 + name: port-12027 + protocol: UDP + hostPort: 12027 + - containerPort: 13027 + name: port-13027 + hostPort: 13027 + - containerPort: 15027 + name: port-15027 + hostPort: 15027 + - containerPort: 16027 + name: port-16027 + hostPort: 16027 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15027" + - name: SSV_API_PORT + value: "16027" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-27 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-27-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-27 + persistentVolumeClaim: + claimName: ssv-node-27 + - name: ssv-node-27-cm + configMap: + name: ssv-node-27-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml new file mode 100644 index 0000000000..dd3365c183 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-28-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-28 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12028 + protocol: UDP + targetPort: 12028 + name: port-12028 + - port: 13028 + protocol: TCP + targetPort: 13028 + name: port-13028 + - port: 15028 + protocol: TCP + targetPort: 15028 + name: metrics + - port: 16028 + protocol: TCP + targetPort: 16028 + name: port-16028 + selector: + app: ssv-node-28 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-28 + name: ssv-node-28 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-28 + template: + metadata: + labels: + app: ssv-node-28 + spec: + containers: + - name: ssv-node-28 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12028 + name: port-12028 + protocol: UDP + hostPort: 12028 + - containerPort: 13028 + name: port-13028 + hostPort: 13028 + - containerPort: 15028 + name: port-15028 + hostPort: 15028 + - containerPort: 16028 + name: port-16028 + hostPort: 16028 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15028" + - name: SSV_API_PORT + value: "16028" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-28 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-28-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-28 + persistentVolumeClaim: + claimName: ssv-node-28 + - name: ssv-node-28-cm + configMap: + name: ssv-node-28-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml new file mode 100644 index 0000000000..759c621ae6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-29-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-29 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12029 + protocol: UDP + targetPort: 12029 + name: port-12029 + - port: 13029 + protocol: TCP + targetPort: 13029 + name: port-13029 + - port: 15029 + protocol: TCP + targetPort: 15029 + name: metrics + - port: 16029 + protocol: TCP + targetPort: 16029 + name: port-16029 + selector: + app: ssv-node-29 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-29 + name: ssv-node-29 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-29 + template: + metadata: + labels: + app: ssv-node-29 + spec: + containers: + - name: ssv-node-29 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12029 + name: port-12029 + protocol: UDP + hostPort: 12029 + - containerPort: 13029 + name: port-13029 + hostPort: 13029 + - containerPort: 15029 + name: port-15029 + hostPort: 15029 + - containerPort: 16029 + name: port-16029 + hostPort: 16029 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15029" + - name: SSV_API_PORT + value: "16029" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-29 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-29-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-29 + persistentVolumeClaim: + claimName: ssv-node-29 + - name: ssv-node-29-cm + configMap: + name: ssv-node-29-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml new file mode 100644 index 0000000000..1fe286bfce --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-3-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-3 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12003 + protocol: UDP + targetPort: 12003 + name: port-12003 + - port: 13003 + protocol: TCP + targetPort: 13003 + name: port-13003 + - port: 15003 + protocol: TCP + targetPort: 15003 + name: metrics + - port: 16003 + protocol: TCP + targetPort: 16003 + name: port-16003 + selector: + app: ssv-node-3 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-3 + name: ssv-node-3 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-3 + template: + metadata: + labels: + app: ssv-node-3 + spec: + containers: + - name: ssv-node-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12003 + name: port-12003 + protocol: UDP + hostPort: 12003 + - containerPort: 13003 + name: port-13003 + hostPort: 13003 + - containerPort: 15003 + name: port-15003 + hostPort: 15003 + - containerPort: 16003 + name: port-16003 + hostPort: 16003 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15003" + - name: SSV_API_PORT + value: "16003" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-3 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-3-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-3 + persistentVolumeClaim: + claimName: ssv-node-3 + - name: ssv-node-3-cm + configMap: + name: ssv-node-3-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml new file mode 100644 index 0000000000..eed6293f41 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-30-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-30 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12030 + protocol: UDP + targetPort: 12030 + name: port-12030 + - port: 13030 + protocol: TCP + targetPort: 13030 + name: port-13030 + - port: 15030 + protocol: TCP + targetPort: 15030 + name: metrics + - port: 16030 + protocol: TCP + targetPort: 16030 + name: port-16030 + selector: + app: ssv-node-30 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-30 + name: ssv-node-30 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-30 + template: + metadata: + labels: + app: ssv-node-30 + spec: + containers: + - name: ssv-node-30 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12030 + name: port-12030 + protocol: UDP + hostPort: 12030 + - containerPort: 13030 + name: port-13030 + hostPort: 13030 + - containerPort: 15030 + name: port-15030 + hostPort: 15030 + - containerPort: 16030 + name: port-16030 + hostPort: 16030 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15030" + - name: SSV_API_PORT + value: "16030" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-30 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-30-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-30 + persistentVolumeClaim: + claimName: ssv-node-30 + - name: ssv-node-30-cm + configMap: + name: ssv-node-30-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml new file mode 100644 index 0000000000..decc10d037 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-31-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-31 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12031 + protocol: UDP + targetPort: 12031 + name: port-12031 + - port: 13031 + protocol: TCP + targetPort: 13031 + name: port-13031 + - port: 15031 + protocol: TCP + targetPort: 15031 + name: metrics + - port: 16031 + protocol: TCP + targetPort: 16031 + name: port-16031 + selector: + app: ssv-node-31 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-31 + name: ssv-node-31 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-31 + template: + metadata: + labels: + app: ssv-node-31 + spec: + containers: + - name: ssv-node-31 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12031 + name: port-12031 + protocol: UDP + hostPort: 12031 + - containerPort: 13031 + name: port-13031 + hostPort: 13031 + - containerPort: 15031 + name: port-15031 + hostPort: 15031 + - containerPort: 16031 + name: port-16031 + hostPort: 16031 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15031" + - name: SSV_API_PORT + value: "16031" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-31 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-31-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-31 + persistentVolumeClaim: + claimName: ssv-node-31 + - name: ssv-node-31-cm + configMap: + name: ssv-node-31-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml new file mode 100644 index 0000000000..32dcbc1587 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-32-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-32 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12032 + protocol: UDP + targetPort: 12032 + name: port-12032 + - port: 13032 + protocol: TCP + targetPort: 13032 + name: port-13032 + - port: 15032 + protocol: TCP + targetPort: 15032 + name: metrics + - port: 16032 + protocol: TCP + targetPort: 16032 + name: port-16032 + selector: + app: ssv-node-32 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-32 + name: ssv-node-32 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-32 + template: + metadata: + labels: + app: ssv-node-32 + spec: + containers: + - name: ssv-node-32 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12032 + name: port-12032 + protocol: UDP + hostPort: 12032 + - containerPort: 13032 + name: port-13032 + hostPort: 13032 + - containerPort: 15032 + name: port-15032 + hostPort: 15032 + - containerPort: 16032 + name: port-16032 + hostPort: 16032 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15032" + - name: SSV_API_PORT + value: "16032" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-32 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-32-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-32 + persistentVolumeClaim: + claimName: ssv-node-32 + - name: ssv-node-32-cm + configMap: + name: ssv-node-32-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml new file mode 100644 index 0000000000..7bb8ee072e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-33-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-33 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12033 + protocol: UDP + targetPort: 12033 + name: port-12033 + - port: 13033 + protocol: TCP + targetPort: 13033 + name: port-13033 + - port: 15033 + protocol: TCP + targetPort: 15033 + name: metrics + - port: 16033 + protocol: TCP + targetPort: 16033 + name: port-16033 + selector: + app: ssv-node-33 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-33 + name: ssv-node-33 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-33 + template: + metadata: + labels: + app: ssv-node-33 + spec: + containers: + - name: ssv-node-33 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12033 + name: port-12033 + protocol: UDP + hostPort: 12033 + - containerPort: 13033 + name: port-13033 + hostPort: 13033 + - containerPort: 15033 + name: port-15033 + hostPort: 15033 + - containerPort: 16033 + name: port-16033 + hostPort: 16033 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15033" + - name: SSV_API_PORT + value: "16033" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-33 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-33-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-33 + persistentVolumeClaim: + claimName: ssv-node-33 + - name: ssv-node-33-cm + configMap: + name: ssv-node-33-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml new file mode 100644 index 0000000000..5ec7a5c1b6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-34-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-34 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12034 + protocol: UDP + targetPort: 12034 + name: port-12034 + - port: 13034 + protocol: TCP + targetPort: 13034 + name: port-13034 + - port: 15034 + protocol: TCP + targetPort: 15034 + name: metrics + - port: 16034 + protocol: TCP + targetPort: 16034 + name: port-16034 + selector: + app: ssv-node-34 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-34 + name: ssv-node-34 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-34 + template: + metadata: + labels: + app: ssv-node-34 + spec: + containers: + - name: ssv-node-34 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12034 + name: port-12034 + protocol: UDP + hostPort: 12034 + - containerPort: 13034 + name: port-13034 + hostPort: 13034 + - containerPort: 15034 + name: port-15034 + hostPort: 15034 + - containerPort: 16034 + name: port-16034 + hostPort: 16034 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15034" + - name: SSV_API_PORT + value: "16034" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-34 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-34-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-34 + persistentVolumeClaim: + claimName: ssv-node-34 + - name: ssv-node-34-cm + configMap: + name: ssv-node-34-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml new file mode 100644 index 0000000000..6430c698f5 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-35-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-35 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12035 + protocol: UDP + targetPort: 12035 + name: port-12035 + - port: 13035 + protocol: TCP + targetPort: 13035 + name: port-13035 + - port: 15035 + protocol: TCP + targetPort: 15035 + name: metrics + - port: 16035 + protocol: TCP + targetPort: 16035 + name: port-16035 + selector: + app: ssv-node-35 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-35 + name: ssv-node-35 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-35 + template: + metadata: + labels: + app: ssv-node-35 + spec: + containers: + - name: ssv-node-35 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12035 + name: port-12035 + protocol: UDP + hostPort: 12035 + - containerPort: 13035 + name: port-13035 + hostPort: 13035 + - containerPort: 15035 + name: port-15035 + hostPort: 15035 + - containerPort: 16035 + name: port-16035 + hostPort: 16035 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15035" + - name: SSV_API_PORT + value: "16035" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-35 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-35-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-35 + persistentVolumeClaim: + claimName: ssv-node-35 + - name: ssv-node-35-cm + configMap: + name: ssv-node-35-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml new file mode 100644 index 0000000000..a91c4fd23a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-36-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-36 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12036 + protocol: UDP + targetPort: 12036 + name: port-12036 + - port: 13036 + protocol: TCP + targetPort: 13036 + name: port-13036 + - port: 15036 + protocol: TCP + targetPort: 15036 + name: metrics + - port: 16036 + protocol: TCP + targetPort: 16036 + name: port-16036 + selector: + app: ssv-node-36 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-36 + name: ssv-node-36 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-36 + template: + metadata: + labels: + app: ssv-node-36 + spec: + containers: + - name: ssv-node-36 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12036 + name: port-12036 + protocol: UDP + hostPort: 12036 + - containerPort: 13036 + name: port-13036 + hostPort: 13036 + - containerPort: 15036 + name: port-15036 + hostPort: 15036 + - containerPort: 16036 + name: port-16036 + hostPort: 16036 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15036" + - name: SSV_API_PORT + value: "16036" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-36 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-36-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-36 + persistentVolumeClaim: + claimName: ssv-node-36 + - name: ssv-node-36-cm + configMap: + name: ssv-node-36-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml new file mode 100644 index 0000000000..ef6fe88f03 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-37-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-37 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12037 + protocol: UDP + targetPort: 12037 + name: port-12037 + - port: 13037 + protocol: TCP + targetPort: 13037 + name: port-13037 + - port: 15037 + protocol: TCP + targetPort: 15037 + name: metrics + - port: 16037 + protocol: TCP + targetPort: 16037 + name: port-16037 + selector: + app: ssv-node-37 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-37 + name: ssv-node-37 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-37 + template: + metadata: + labels: + app: ssv-node-37 + spec: + containers: + - name: ssv-node-37 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12037 + name: port-12037 + protocol: UDP + hostPort: 12037 + - containerPort: 13037 + name: port-13037 + hostPort: 13037 + - containerPort: 15037 + name: port-15037 + hostPort: 15037 + - containerPort: 16037 + name: port-16037 + hostPort: 16037 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15037" + - name: SSV_API_PORT + value: "16037" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-37 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-37-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-37 + persistentVolumeClaim: + claimName: ssv-node-37 + - name: ssv-node-37-cm + configMap: + name: ssv-node-37-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml new file mode 100644 index 0000000000..c2949533e8 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-38-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-38 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12038 + protocol: UDP + targetPort: 12038 + name: port-12038 + - port: 13038 + protocol: TCP + targetPort: 13038 + name: port-13038 + - port: 15038 + protocol: TCP + targetPort: 15038 + name: metrics + - port: 16038 + protocol: TCP + targetPort: 16038 + name: port-16038 + selector: + app: ssv-node-38 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-38 + name: ssv-node-38 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-38 + template: + metadata: + labels: + app: ssv-node-38 + spec: + containers: + - name: ssv-node-38 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12038 + name: port-12038 + protocol: UDP + hostPort: 12038 + - containerPort: 13038 + name: port-13038 + hostPort: 13038 + - containerPort: 15038 + name: port-15038 + hostPort: 15038 + - containerPort: 16038 + name: port-16038 + hostPort: 16038 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15038" + - name: SSV_API_PORT + value: "16038" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-38 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-38-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-38 + persistentVolumeClaim: + claimName: ssv-node-38 + - name: ssv-node-38-cm + configMap: + name: ssv-node-38-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml new file mode 100644 index 0000000000..9b5e0dd6d5 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-39-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-39 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12039 + protocol: UDP + targetPort: 12039 + name: port-12039 + - port: 13039 + protocol: TCP + targetPort: 13039 + name: port-13039 + - port: 15039 + protocol: TCP + targetPort: 15039 + name: metrics + - port: 16039 + protocol: TCP + targetPort: 16039 + name: port-16039 + selector: + app: ssv-node-39 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-39 + name: ssv-node-39 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-39 + template: + metadata: + labels: + app: ssv-node-39 + spec: + containers: + - name: ssv-node-39 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12039 + name: port-12039 + protocol: UDP + hostPort: 12039 + - containerPort: 13039 + name: port-13039 + hostPort: 13039 + - containerPort: 15039 + name: port-15039 + hostPort: 15039 + - containerPort: 16039 + name: port-16039 + hostPort: 16039 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15039" + - name: SSV_API_PORT + value: "16039" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-39 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-39-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-39 + persistentVolumeClaim: + claimName: ssv-node-39 + - name: ssv-node-39-cm + configMap: + name: ssv-node-39-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml new file mode 100644 index 0000000000..e14bf0186d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-4-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-4 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12004 + protocol: UDP + targetPort: 12004 + name: port-12004 + - port: 13004 + protocol: TCP + targetPort: 13004 + name: port-13004 + - port: 15004 + protocol: TCP + targetPort: 15004 + name: metrics + - port: 16004 + protocol: TCP + targetPort: 16004 + name: port-16004 + selector: + app: ssv-node-4 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-4 + name: ssv-node-4 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-4 + template: + metadata: + labels: + app: ssv-node-4 + spec: + containers: + - name: ssv-node-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12004 + name: port-12004 + protocol: UDP + hostPort: 12004 + - containerPort: 13004 + name: port-13004 + hostPort: 13004 + - containerPort: 15004 + name: port-15004 + hostPort: 15004 + - containerPort: 16004 + name: port-16004 + hostPort: 16004 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15004" + - name: SSV_API_PORT + value: "16004" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-4 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-4-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-4 + persistentVolumeClaim: + claimName: ssv-node-4 + - name: ssv-node-4-cm + configMap: + name: ssv-node-4-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml new file mode 100644 index 0000000000..ab0f8f974a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-40-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-40 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12040 + protocol: UDP + targetPort: 12040 + name: port-12040 + - port: 13040 + protocol: TCP + targetPort: 13040 + name: port-13040 + - port: 15040 + protocol: TCP + targetPort: 15040 + name: metrics + - port: 16040 + protocol: TCP + targetPort: 16040 + name: port-16040 + selector: + app: ssv-node-40 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-40 + name: ssv-node-40 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-40 + template: + metadata: + labels: + app: ssv-node-40 + spec: + containers: + - name: ssv-node-40 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12040 + name: port-12040 + protocol: UDP + hostPort: 12040 + - containerPort: 13040 + name: port-13040 + hostPort: 13040 + - containerPort: 15040 + name: port-15040 + hostPort: 15040 + - containerPort: 16040 + name: port-16040 + hostPort: 16040 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15040" + - name: SSV_API_PORT + value: "16040" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-40 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-40-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-40 + persistentVolumeClaim: + claimName: ssv-node-40 + - name: ssv-node-40-cm + configMap: + name: ssv-node-40-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml new file mode 100644 index 0000000000..cc177afcf1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-41-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-41 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12041 + protocol: UDP + targetPort: 12041 + name: port-12041 + - port: 13041 + protocol: TCP + targetPort: 13041 + name: port-13041 + - port: 15041 + protocol: TCP + targetPort: 15041 + name: metrics + - port: 16041 + protocol: TCP + targetPort: 16041 + name: port-16041 + selector: + app: ssv-node-41 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-41 + name: ssv-node-41 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-41 + template: + metadata: + labels: + app: ssv-node-41 + spec: + containers: + - name: ssv-node-41 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12041 + name: port-12041 + protocol: UDP + hostPort: 12041 + - containerPort: 13041 + name: port-13041 + hostPort: 13041 + - containerPort: 15041 + name: port-15041 + hostPort: 15041 + - containerPort: 16041 + name: port-16041 + hostPort: 16041 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15041" + - name: SSV_API_PORT + value: "16041" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-41 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-41-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-41 + persistentVolumeClaim: + claimName: ssv-node-41 + - name: ssv-node-41-cm + configMap: + name: ssv-node-41-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml new file mode 100644 index 0000000000..635b268042 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-42-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-42 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12042 + protocol: UDP + targetPort: 12042 + name: port-12042 + - port: 13042 + protocol: TCP + targetPort: 13042 + name: port-13042 + - port: 15042 + protocol: TCP + targetPort: 15042 + name: metrics + - port: 16042 + protocol: TCP + targetPort: 16042 + name: port-16042 + selector: + app: ssv-node-42 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-42 + name: ssv-node-42 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-42 + template: + metadata: + labels: + app: ssv-node-42 + spec: + containers: + - name: ssv-node-42 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12042 + name: port-12042 + protocol: UDP + hostPort: 12042 + - containerPort: 13042 + name: port-13042 + hostPort: 13042 + - containerPort: 15042 + name: port-15042 + hostPort: 15042 + - containerPort: 16042 + name: port-16042 + hostPort: 16042 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15042" + - name: SSV_API_PORT + value: "16042" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-42 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-42-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-42 + persistentVolumeClaim: + claimName: ssv-node-42 + - name: ssv-node-42-cm + configMap: + name: ssv-node-42-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml new file mode 100644 index 0000000000..4731455412 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-43-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-43 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12043 + protocol: UDP + targetPort: 12043 + name: port-12043 + - port: 13043 + protocol: TCP + targetPort: 13043 + name: port-13043 + - port: 15043 + protocol: TCP + targetPort: 15043 + name: metrics + - port: 16043 + protocol: TCP + targetPort: 16043 + name: port-16043 + selector: + app: ssv-node-43 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-43 + name: ssv-node-43 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-43 + template: + metadata: + labels: + app: ssv-node-43 + spec: + containers: + - name: ssv-node-43 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12043 + name: port-12043 + protocol: UDP + hostPort: 12043 + - containerPort: 13043 + name: port-13043 + hostPort: 13043 + - containerPort: 15043 + name: port-15043 + hostPort: 15043 + - containerPort: 16043 + name: port-16043 + hostPort: 16043 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15043" + - name: SSV_API_PORT + value: "16043" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-43 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-43-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-43 + persistentVolumeClaim: + claimName: ssv-node-43 + - name: ssv-node-43-cm + configMap: + name: ssv-node-43-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml new file mode 100644 index 0000000000..b9b8b0c5a4 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-44-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-44 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12044 + protocol: UDP + targetPort: 12044 + name: port-12044 + - port: 13044 + protocol: TCP + targetPort: 13044 + name: port-13044 + - port: 15044 + protocol: TCP + targetPort: 15044 + name: metrics + - port: 16044 + protocol: TCP + targetPort: 16044 + name: port-16044 + selector: + app: ssv-node-44 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-44 + name: ssv-node-44 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-44 + template: + metadata: + labels: + app: ssv-node-44 + spec: + containers: + - name: ssv-node-44 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12044 + name: port-12044 + protocol: UDP + hostPort: 12044 + - containerPort: 13044 + name: port-13044 + hostPort: 13044 + - containerPort: 15044 + name: port-15044 + hostPort: 15044 + - containerPort: 16044 + name: port-16044 + hostPort: 16044 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15044" + - name: SSV_API_PORT + value: "16044" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-44 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-44-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-44 + persistentVolumeClaim: + claimName: ssv-node-44 + - name: ssv-node-44-cm + configMap: + name: ssv-node-44-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml new file mode 100644 index 0000000000..6636204199 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-45-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-45 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12045 + protocol: UDP + targetPort: 12045 + name: port-12045 + - port: 13045 + protocol: TCP + targetPort: 13045 + name: port-13045 + - port: 15045 + protocol: TCP + targetPort: 15045 + name: metrics + - port: 16045 + protocol: TCP + targetPort: 16045 + name: port-16045 + selector: + app: ssv-node-45 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-45 + name: ssv-node-45 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-45 + template: + metadata: + labels: + app: ssv-node-45 + spec: + containers: + - name: ssv-node-45 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12045 + name: port-12045 + protocol: UDP + hostPort: 12045 + - containerPort: 13045 + name: port-13045 + hostPort: 13045 + - containerPort: 15045 + name: port-15045 + hostPort: 15045 + - containerPort: 16045 + name: port-16045 + hostPort: 16045 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15045" + - name: SSV_API_PORT + value: "16045" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-45 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-45-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-45 + persistentVolumeClaim: + claimName: ssv-node-45 + - name: ssv-node-45-cm + configMap: + name: ssv-node-45-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml new file mode 100644 index 0000000000..9abe08db3b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-46-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-46 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12046 + protocol: UDP + targetPort: 12046 + name: port-12046 + - port: 13046 + protocol: TCP + targetPort: 13046 + name: port-13046 + - port: 15046 + protocol: TCP + targetPort: 15046 + name: metrics + - port: 16046 + protocol: TCP + targetPort: 16046 + name: port-16046 + selector: + app: ssv-node-46 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-46 + name: ssv-node-46 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-46 + template: + metadata: + labels: + app: ssv-node-46 + spec: + containers: + - name: ssv-node-46 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12046 + name: port-12046 + protocol: UDP + hostPort: 12046 + - containerPort: 13046 + name: port-13046 + hostPort: 13046 + - containerPort: 15046 + name: port-15046 + hostPort: 15046 + - containerPort: 16046 + name: port-16046 + hostPort: 16046 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15046" + - name: SSV_API_PORT + value: "16046" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-46 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-46-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-46 + persistentVolumeClaim: + claimName: ssv-node-46 + - name: ssv-node-46-cm + configMap: + name: ssv-node-46-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml new file mode 100644 index 0000000000..89964d30fb --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-47-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-47 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12047 + protocol: UDP + targetPort: 12047 + name: port-12047 + - port: 13047 + protocol: TCP + targetPort: 13047 + name: port-13047 + - port: 15047 + protocol: TCP + targetPort: 15047 + name: metrics + - port: 16047 + protocol: TCP + targetPort: 16047 + name: port-16047 + selector: + app: ssv-node-47 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-47 + name: ssv-node-47 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-47 + template: + metadata: + labels: + app: ssv-node-47 + spec: + containers: + - name: ssv-node-47 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12047 + name: port-12047 + protocol: UDP + hostPort: 12047 + - containerPort: 13047 + name: port-13047 + hostPort: 13047 + - containerPort: 15047 + name: port-15047 + hostPort: 15047 + - containerPort: 16047 + name: port-16047 + hostPort: 16047 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15047" + - name: SSV_API_PORT + value: "16047" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-47 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-47-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-47 + persistentVolumeClaim: + claimName: ssv-node-47 + - name: ssv-node-47-cm + configMap: + name: ssv-node-47-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml new file mode 100644 index 0000000000..843835dd40 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-48-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-48 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12048 + protocol: UDP + targetPort: 12048 + name: port-12048 + - port: 13048 + protocol: TCP + targetPort: 13048 + name: port-13048 + - port: 15048 + protocol: TCP + targetPort: 15048 + name: metrics + - port: 16048 + protocol: TCP + targetPort: 16048 + name: port-16048 + selector: + app: ssv-node-48 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-48 + name: ssv-node-48 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-48 + template: + metadata: + labels: + app: ssv-node-48 + spec: + containers: + - name: ssv-node-48 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12048 + name: port-12048 + protocol: UDP + hostPort: 12048 + - containerPort: 13048 + name: port-13048 + hostPort: 13048 + - containerPort: 15048 + name: port-15048 + hostPort: 15048 + - containerPort: 16048 + name: port-16048 + hostPort: 16048 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15048" + - name: SSV_API_PORT + value: "16048" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-48 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-48-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-48 + persistentVolumeClaim: + claimName: ssv-node-48 + - name: ssv-node-48-cm + configMap: + name: ssv-node-48-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml new file mode 100644 index 0000000000..5a557185dc --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-49-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-49 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12049 + protocol: UDP + targetPort: 12049 + name: port-12049 + - port: 13049 + protocol: TCP + targetPort: 13049 + name: port-13049 + - port: 15049 + protocol: TCP + targetPort: 15049 + name: metrics + - port: 16049 + protocol: TCP + targetPort: 16049 + name: port-16049 + selector: + app: ssv-node-49 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-49 + name: ssv-node-49 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-49 + template: + metadata: + labels: + app: ssv-node-49 + spec: + containers: + - name: ssv-node-49 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12049 + name: port-12049 + protocol: UDP + hostPort: 12049 + - containerPort: 13049 + name: port-13049 + hostPort: 13049 + - containerPort: 15049 + name: port-15049 + hostPort: 15049 + - containerPort: 16049 + name: port-16049 + hostPort: 16049 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15049" + - name: SSV_API_PORT + value: "16049" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-49 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-49-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-49 + persistentVolumeClaim: + claimName: ssv-node-49 + - name: ssv-node-49-cm + configMap: + name: ssv-node-49-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml new file mode 100644 index 0000000000..94d184dec0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-5-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-5 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12005 + protocol: UDP + targetPort: 12005 + name: port-12005 + - port: 13005 + protocol: TCP + targetPort: 13005 + name: port-13005 + - port: 15005 + protocol: TCP + targetPort: 15005 + name: metrics + - port: 16005 + protocol: TCP + targetPort: 16005 + name: port-16005 + selector: + app: ssv-node-5 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-5 + name: ssv-node-5 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-5 + template: + metadata: + labels: + app: ssv-node-5 + spec: + containers: + - name: ssv-node-5 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12005 + name: port-12005 + protocol: UDP + hostPort: 12005 + - containerPort: 13005 + name: port-13005 + hostPort: 13005 + - containerPort: 15005 + name: port-15005 + hostPort: 15005 + - containerPort: 16005 + name: port-16005 + hostPort: 16005 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15005" + - name: SSV_API_PORT + value: "16005" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-5 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-5-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-5 + persistentVolumeClaim: + claimName: ssv-node-5 + - name: ssv-node-5-cm + configMap: + name: ssv-node-5-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml new file mode 100644 index 0000000000..0099320434 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-50-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-50 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12050 + protocol: UDP + targetPort: 12050 + name: port-12050 + - port: 13050 + protocol: TCP + targetPort: 13050 + name: port-13050 + - port: 15050 + protocol: TCP + targetPort: 15050 + name: metrics + - port: 16050 + protocol: TCP + targetPort: 16050 + name: port-16050 + selector: + app: ssv-node-50 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-50 + name: ssv-node-50 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-50 + template: + metadata: + labels: + app: ssv-node-50 + spec: + containers: + - name: ssv-node-50 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12050 + name: port-12050 + protocol: UDP + hostPort: 12050 + - containerPort: 13050 + name: port-13050 + hostPort: 13050 + - containerPort: 15050 + name: port-15050 + hostPort: 15050 + - containerPort: 16050 + name: port-16050 + hostPort: 16050 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15050" + - name: SSV_API_PORT + value: "16050" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-50 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-50-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-50 + persistentVolumeClaim: + claimName: ssv-node-50 + - name: ssv-node-50-cm + configMap: + name: ssv-node-50-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml new file mode 100644 index 0000000000..7933ca7218 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-51-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-51 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12051 + protocol: UDP + targetPort: 12051 + name: port-12051 + - port: 13051 + protocol: TCP + targetPort: 13051 + name: port-13051 + - port: 15051 + protocol: TCP + targetPort: 15051 + name: metrics + - port: 16051 + protocol: TCP + targetPort: 16051 + name: port-16051 + selector: + app: ssv-node-51 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-51 + name: ssv-node-51 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-51 + template: + metadata: + labels: + app: ssv-node-51 + spec: + containers: + - name: ssv-node-51 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12051 + name: port-12051 + protocol: UDP + hostPort: 12051 + - containerPort: 13051 + name: port-13051 + hostPort: 13051 + - containerPort: 15051 + name: port-15051 + hostPort: 15051 + - containerPort: 16051 + name: port-16051 + hostPort: 16051 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15051" + - name: SSV_API_PORT + value: "16051" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-51 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-51-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-51 + persistentVolumeClaim: + claimName: ssv-node-51 + - name: ssv-node-51-cm + configMap: + name: ssv-node-51-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml new file mode 100644 index 0000000000..46a23039c7 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-52-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-52 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12052 + protocol: UDP + targetPort: 12052 + name: port-12052 + - port: 13052 + protocol: TCP + targetPort: 13052 + name: port-13052 + - port: 15052 + protocol: TCP + targetPort: 15052 + name: metrics + - port: 16052 + protocol: TCP + targetPort: 16052 + name: port-16052 + selector: + app: ssv-node-52 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-52 + name: ssv-node-52 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-52 + template: + metadata: + labels: + app: ssv-node-52 + spec: + containers: + - name: ssv-node-52 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12052 + name: port-12052 + protocol: UDP + hostPort: 12052 + - containerPort: 13052 + name: port-13052 + hostPort: 13052 + - containerPort: 15052 + name: port-15052 + hostPort: 15052 + - containerPort: 16052 + name: port-16052 + hostPort: 16052 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15052" + - name: SSV_API_PORT + value: "16052" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-52 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-52-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-52 + persistentVolumeClaim: + claimName: ssv-node-52 + - name: ssv-node-52-cm + configMap: + name: ssv-node-52-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml new file mode 100644 index 0000000000..373b3fd9a5 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-53-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-53 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12053 + protocol: UDP + targetPort: 12053 + name: port-12053 + - port: 13053 + protocol: TCP + targetPort: 13053 + name: port-13053 + - port: 15053 + protocol: TCP + targetPort: 15053 + name: metrics + - port: 16053 + protocol: TCP + targetPort: 16053 + name: port-16053 + selector: + app: ssv-node-53 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-53 + name: ssv-node-53 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-53 + template: + metadata: + labels: + app: ssv-node-53 + spec: + containers: + - name: ssv-node-53 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12053 + name: port-12053 + protocol: UDP + hostPort: 12053 + - containerPort: 13053 + name: port-13053 + hostPort: 13053 + - containerPort: 15053 + name: port-15053 + hostPort: 15053 + - containerPort: 16053 + name: port-16053 + hostPort: 16053 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15053" + - name: SSV_API_PORT + value: "16053" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-53 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-53-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-53 + persistentVolumeClaim: + claimName: ssv-node-53 + - name: ssv-node-53-cm + configMap: + name: ssv-node-53-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml new file mode 100644 index 0000000000..fa81104af3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-54-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-54 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12054 + protocol: UDP + targetPort: 12054 + name: port-12054 + - port: 13054 + protocol: TCP + targetPort: 13054 + name: port-13054 + - port: 15054 + protocol: TCP + targetPort: 15054 + name: metrics + - port: 16054 + protocol: TCP + targetPort: 16054 + name: port-16054 + selector: + app: ssv-node-54 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-54 + name: ssv-node-54 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-54 + template: + metadata: + labels: + app: ssv-node-54 + spec: + containers: + - name: ssv-node-54 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12054 + name: port-12054 + protocol: UDP + hostPort: 12054 + - containerPort: 13054 + name: port-13054 + hostPort: 13054 + - containerPort: 15054 + name: port-15054 + hostPort: 15054 + - containerPort: 16054 + name: port-16054 + hostPort: 16054 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15054" + - name: SSV_API_PORT + value: "16054" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-54 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-54-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-54 + persistentVolumeClaim: + claimName: ssv-node-54 + - name: ssv-node-54-cm + configMap: + name: ssv-node-54-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml new file mode 100644 index 0000000000..0c5f96d861 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-55-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-55 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12055 + protocol: UDP + targetPort: 12055 + name: port-12055 + - port: 13055 + protocol: TCP + targetPort: 13055 + name: port-13055 + - port: 15055 + protocol: TCP + targetPort: 15055 + name: metrics + - port: 16055 + protocol: TCP + targetPort: 16055 + name: port-16055 + selector: + app: ssv-node-55 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-55 + name: ssv-node-55 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-55 + template: + metadata: + labels: + app: ssv-node-55 + spec: + containers: + - name: ssv-node-55 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12055 + name: port-12055 + protocol: UDP + hostPort: 12055 + - containerPort: 13055 + name: port-13055 + hostPort: 13055 + - containerPort: 15055 + name: port-15055 + hostPort: 15055 + - containerPort: 16055 + name: port-16055 + hostPort: 16055 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15055" + - name: SSV_API_PORT + value: "16055" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-55 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-55-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-55 + persistentVolumeClaim: + claimName: ssv-node-55 + - name: ssv-node-55-cm + configMap: + name: ssv-node-55-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml new file mode 100644 index 0000000000..4980e56786 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-56-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-56 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12056 + protocol: UDP + targetPort: 12056 + name: port-12056 + - port: 13056 + protocol: TCP + targetPort: 13056 + name: port-13056 + - port: 15056 + protocol: TCP + targetPort: 15056 + name: metrics + - port: 16056 + protocol: TCP + targetPort: 16056 + name: port-16056 + selector: + app: ssv-node-56 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-56 + name: ssv-node-56 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-56 + template: + metadata: + labels: + app: ssv-node-56 + spec: + containers: + - name: ssv-node-56 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12056 + name: port-12056 + protocol: UDP + hostPort: 12056 + - containerPort: 13056 + name: port-13056 + hostPort: 13056 + - containerPort: 15056 + name: port-15056 + hostPort: 15056 + - containerPort: 16056 + name: port-16056 + hostPort: 16056 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15056" + - name: SSV_API_PORT + value: "16056" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-56 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-56-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-56 + persistentVolumeClaim: + claimName: ssv-node-56 + - name: ssv-node-56-cm + configMap: + name: ssv-node-56-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml new file mode 100644 index 0000000000..6fea9bd5f3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-57-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-57 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12057 + protocol: UDP + targetPort: 12057 + name: port-12057 + - port: 13057 + protocol: TCP + targetPort: 13057 + name: port-13057 + - port: 15057 + protocol: TCP + targetPort: 15057 + name: metrics + - port: 16057 + protocol: TCP + targetPort: 16057 + name: port-16057 + selector: + app: ssv-node-57 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-57 + name: ssv-node-57 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-57 + template: + metadata: + labels: + app: ssv-node-57 + spec: + containers: + - name: ssv-node-57 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12057 + name: port-12057 + protocol: UDP + hostPort: 12057 + - containerPort: 13057 + name: port-13057 + hostPort: 13057 + - containerPort: 15057 + name: port-15057 + hostPort: 15057 + - containerPort: 16057 + name: port-16057 + hostPort: 16057 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15057" + - name: SSV_API_PORT + value: "16057" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-57 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-57-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-57 + persistentVolumeClaim: + claimName: ssv-node-57 + - name: ssv-node-57-cm + configMap: + name: ssv-node-57-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml new file mode 100644 index 0000000000..ba1175da79 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-58-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-58 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12058 + protocol: UDP + targetPort: 12058 + name: port-12058 + - port: 13058 + protocol: TCP + targetPort: 13058 + name: port-13058 + - port: 15058 + protocol: TCP + targetPort: 15058 + name: metrics + - port: 16058 + protocol: TCP + targetPort: 16058 + name: port-16058 + selector: + app: ssv-node-58 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-58 + name: ssv-node-58 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-58 + template: + metadata: + labels: + app: ssv-node-58 + spec: + containers: + - name: ssv-node-58 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12058 + name: port-12058 + protocol: UDP + hostPort: 12058 + - containerPort: 13058 + name: port-13058 + hostPort: 13058 + - containerPort: 15058 + name: port-15058 + hostPort: 15058 + - containerPort: 16058 + name: port-16058 + hostPort: 16058 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15058" + - name: SSV_API_PORT + value: "16058" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-58 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-58-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-58 + persistentVolumeClaim: + claimName: ssv-node-58 + - name: ssv-node-58-cm + configMap: + name: ssv-node-58-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml new file mode 100644 index 0000000000..2232660448 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-59-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-59 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12059 + protocol: UDP + targetPort: 12059 + name: port-12059 + - port: 13059 + protocol: TCP + targetPort: 13059 + name: port-13059 + - port: 15059 + protocol: TCP + targetPort: 15059 + name: metrics + - port: 16059 + protocol: TCP + targetPort: 16059 + name: port-16059 + selector: + app: ssv-node-59 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-59 + name: ssv-node-59 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-59 + template: + metadata: + labels: + app: ssv-node-59 + spec: + containers: + - name: ssv-node-59 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12059 + name: port-12059 + protocol: UDP + hostPort: 12059 + - containerPort: 13059 + name: port-13059 + hostPort: 13059 + - containerPort: 15059 + name: port-15059 + hostPort: 15059 + - containerPort: 16059 + name: port-16059 + hostPort: 16059 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15059" + - name: SSV_API_PORT + value: "16059" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-59 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-59-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-59 + persistentVolumeClaim: + claimName: ssv-node-59 + - name: ssv-node-59-cm + configMap: + name: ssv-node-59-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml new file mode 100644 index 0000000000..945c0a7779 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-6-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-6 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12006 + protocol: UDP + targetPort: 12006 + name: port-12006 + - port: 13006 + protocol: TCP + targetPort: 13006 + name: port-13006 + - port: 15006 + protocol: TCP + targetPort: 15006 + name: metrics + - port: 16006 + protocol: TCP + targetPort: 16006 + name: port-16006 + selector: + app: ssv-node-6 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-6 + name: ssv-node-6 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-6 + template: + metadata: + labels: + app: ssv-node-6 + spec: + containers: + - name: ssv-node-6 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12006 + name: port-12006 + protocol: UDP + hostPort: 12006 + - containerPort: 13006 + name: port-13006 + hostPort: 13006 + - containerPort: 15006 + name: port-15006 + hostPort: 15006 + - containerPort: 16006 + name: port-16006 + hostPort: 16006 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15006" + - name: SSV_API_PORT + value: "16006" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-6 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-6-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-6 + persistentVolumeClaim: + claimName: ssv-node-6 + - name: ssv-node-6-cm + configMap: + name: ssv-node-6-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml new file mode 100644 index 0000000000..28536a9fd9 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-60-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-60 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12060 + protocol: UDP + targetPort: 12060 + name: port-12060 + - port: 13060 + protocol: TCP + targetPort: 13060 + name: port-13060 + - port: 15060 + protocol: TCP + targetPort: 15060 + name: metrics + - port: 16060 + protocol: TCP + targetPort: 16060 + name: port-16060 + selector: + app: ssv-node-60 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-60 + name: ssv-node-60 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-60 + template: + metadata: + labels: + app: ssv-node-60 + spec: + containers: + - name: ssv-node-60 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12060 + name: port-12060 + protocol: UDP + hostPort: 12060 + - containerPort: 13060 + name: port-13060 + hostPort: 13060 + - containerPort: 15060 + name: port-15060 + hostPort: 15060 + - containerPort: 16060 + name: port-16060 + hostPort: 16060 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15060" + - name: SSV_API_PORT + value: "16060" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-60 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-60-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-60 + persistentVolumeClaim: + claimName: ssv-node-60 + - name: ssv-node-60-cm + configMap: + name: ssv-node-60-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml new file mode 100644 index 0000000000..a4802318f7 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-61-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-61 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12061 + protocol: UDP + targetPort: 12061 + name: port-12061 + - port: 13061 + protocol: TCP + targetPort: 13061 + name: port-13061 + - port: 15061 + protocol: TCP + targetPort: 15061 + name: metrics + - port: 16061 + protocol: TCP + targetPort: 16061 + name: port-16061 + selector: + app: ssv-node-61 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-61 + name: ssv-node-61 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-61 + template: + metadata: + labels: + app: ssv-node-61 + spec: + containers: + - name: ssv-node-61 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12061 + name: port-12061 + protocol: UDP + hostPort: 12061 + - containerPort: 13061 + name: port-13061 + hostPort: 13061 + - containerPort: 15061 + name: port-15061 + hostPort: 15061 + - containerPort: 16061 + name: port-16061 + hostPort: 16061 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15061" + - name: SSV_API_PORT + value: "16061" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-61 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-61-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-61 + persistentVolumeClaim: + claimName: ssv-node-61 + - name: ssv-node-61-cm + configMap: + name: ssv-node-61-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml new file mode 100644 index 0000000000..3ceb7303cf --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-62-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-62 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12062 + protocol: UDP + targetPort: 12062 + name: port-12062 + - port: 13062 + protocol: TCP + targetPort: 13062 + name: port-13062 + - port: 15062 + protocol: TCP + targetPort: 15062 + name: metrics + - port: 16062 + protocol: TCP + targetPort: 16062 + name: port-16062 + selector: + app: ssv-node-62 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-62 + name: ssv-node-62 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-62 + template: + metadata: + labels: + app: ssv-node-62 + spec: + containers: + - name: ssv-node-62 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12062 + name: port-12062 + protocol: UDP + hostPort: 12062 + - containerPort: 13062 + name: port-13062 + hostPort: 13062 + - containerPort: 15062 + name: port-15062 + hostPort: 15062 + - containerPort: 16062 + name: port-16062 + hostPort: 16062 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15062" + - name: SSV_API_PORT + value: "16062" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-62 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-62-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-62 + persistentVolumeClaim: + claimName: ssv-node-62 + - name: ssv-node-62-cm + configMap: + name: ssv-node-62-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml new file mode 100644 index 0000000000..e445668038 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-63-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-63 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12063 + protocol: UDP + targetPort: 12063 + name: port-12063 + - port: 13063 + protocol: TCP + targetPort: 13063 + name: port-13063 + - port: 15063 + protocol: TCP + targetPort: 15063 + name: metrics + - port: 16063 + protocol: TCP + targetPort: 16063 + name: port-16063 + selector: + app: ssv-node-63 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-63 + name: ssv-node-63 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-63 + template: + metadata: + labels: + app: ssv-node-63 + spec: + containers: + - name: ssv-node-63 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12063 + name: port-12063 + protocol: UDP + hostPort: 12063 + - containerPort: 13063 + name: port-13063 + hostPort: 13063 + - containerPort: 15063 + name: port-15063 + hostPort: 15063 + - containerPort: 16063 + name: port-16063 + hostPort: 16063 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15063" + - name: SSV_API_PORT + value: "16063" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-63 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-63-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-63 + persistentVolumeClaim: + claimName: ssv-node-63 + - name: ssv-node-63-cm + configMap: + name: ssv-node-63-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml new file mode 100644 index 0000000000..41622ca0be --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-64-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-64 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12064 + protocol: UDP + targetPort: 12064 + name: port-12064 + - port: 13064 + protocol: TCP + targetPort: 13064 + name: port-13064 + - port: 15064 + protocol: TCP + targetPort: 15064 + name: metrics + - port: 16064 + protocol: TCP + targetPort: 16064 + name: port-16064 + selector: + app: ssv-node-64 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-64 + name: ssv-node-64 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-64 + template: + metadata: + labels: + app: ssv-node-64 + spec: + containers: + - name: ssv-node-64 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12064 + name: port-12064 + protocol: UDP + hostPort: 12064 + - containerPort: 13064 + name: port-13064 + hostPort: 13064 + - containerPort: 15064 + name: port-15064 + hostPort: 15064 + - containerPort: 16064 + name: port-16064 + hostPort: 16064 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15064" + - name: SSV_API_PORT + value: "16064" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-64 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-64-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-64 + persistentVolumeClaim: + claimName: ssv-node-64 + - name: ssv-node-64-cm + configMap: + name: ssv-node-64-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml new file mode 100644 index 0000000000..5390b92509 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-65-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-65 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12065 + protocol: UDP + targetPort: 12065 + name: port-12065 + - port: 13065 + protocol: TCP + targetPort: 13065 + name: port-13065 + - port: 15065 + protocol: TCP + targetPort: 15065 + name: metrics + - port: 16065 + protocol: TCP + targetPort: 16065 + name: port-16065 + selector: + app: ssv-node-65 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-65 + name: ssv-node-65 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-65 + template: + metadata: + labels: + app: ssv-node-65 + spec: + containers: + - name: ssv-node-65 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12065 + name: port-12065 + protocol: UDP + hostPort: 12065 + - containerPort: 13065 + name: port-13065 + hostPort: 13065 + - containerPort: 15065 + name: port-15065 + hostPort: 15065 + - containerPort: 16065 + name: port-16065 + hostPort: 16065 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15065" + - name: SSV_API_PORT + value: "16065" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-65 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-65-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-65 + persistentVolumeClaim: + claimName: ssv-node-65 + - name: ssv-node-65-cm + configMap: + name: ssv-node-65-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml new file mode 100644 index 0000000000..d6e86a35bc --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-66-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-66 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12066 + protocol: UDP + targetPort: 12066 + name: port-12066 + - port: 13066 + protocol: TCP + targetPort: 13066 + name: port-13066 + - port: 15066 + protocol: TCP + targetPort: 15066 + name: metrics + - port: 16066 + protocol: TCP + targetPort: 16066 + name: port-16066 + selector: + app: ssv-node-66 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-66 + name: ssv-node-66 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-66 + template: + metadata: + labels: + app: ssv-node-66 + spec: + containers: + - name: ssv-node-66 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12066 + name: port-12066 + protocol: UDP + hostPort: 12066 + - containerPort: 13066 + name: port-13066 + hostPort: 13066 + - containerPort: 15066 + name: port-15066 + hostPort: 15066 + - containerPort: 16066 + name: port-16066 + hostPort: 16066 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15066" + - name: SSV_API_PORT + value: "16066" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-66 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-66-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-66 + persistentVolumeClaim: + claimName: ssv-node-66 + - name: ssv-node-66-cm + configMap: + name: ssv-node-66-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml new file mode 100644 index 0000000000..047e8c1b84 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-67-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-67 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12067 + protocol: UDP + targetPort: 12067 + name: port-12067 + - port: 13067 + protocol: TCP + targetPort: 13067 + name: port-13067 + - port: 15067 + protocol: TCP + targetPort: 15067 + name: metrics + - port: 16067 + protocol: TCP + targetPort: 16067 + name: port-16067 + selector: + app: ssv-node-67 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-67 + name: ssv-node-67 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-67 + template: + metadata: + labels: + app: ssv-node-67 + spec: + containers: + - name: ssv-node-67 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12067 + name: port-12067 + protocol: UDP + hostPort: 12067 + - containerPort: 13067 + name: port-13067 + hostPort: 13067 + - containerPort: 15067 + name: port-15067 + hostPort: 15067 + - containerPort: 16067 + name: port-16067 + hostPort: 16067 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15067" + - name: SSV_API_PORT + value: "16067" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-67 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-67-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-67 + persistentVolumeClaim: + claimName: ssv-node-67 + - name: ssv-node-67-cm + configMap: + name: ssv-node-67-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml new file mode 100644 index 0000000000..777a5ca07a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-68-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-68 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12068 + protocol: UDP + targetPort: 12068 + name: port-12068 + - port: 13068 + protocol: TCP + targetPort: 13068 + name: port-13068 + - port: 15068 + protocol: TCP + targetPort: 15068 + name: metrics + - port: 16068 + protocol: TCP + targetPort: 16068 + name: port-16068 + selector: + app: ssv-node-68 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-68 + name: ssv-node-68 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-68 + template: + metadata: + labels: + app: ssv-node-68 + spec: + containers: + - name: ssv-node-68 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12068 + name: port-12068 + protocol: UDP + hostPort: 12068 + - containerPort: 13068 + name: port-13068 + hostPort: 13068 + - containerPort: 15068 + name: port-15068 + hostPort: 15068 + - containerPort: 16068 + name: port-16068 + hostPort: 16068 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15068" + - name: SSV_API_PORT + value: "16068" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-68 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-68-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-68 + persistentVolumeClaim: + claimName: ssv-node-68 + - name: ssv-node-68-cm + configMap: + name: ssv-node-68-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml new file mode 100644 index 0000000000..9d7fd2bbaa --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-69-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-69 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12069 + protocol: UDP + targetPort: 12069 + name: port-12069 + - port: 13069 + protocol: TCP + targetPort: 13069 + name: port-13069 + - port: 15069 + protocol: TCP + targetPort: 15069 + name: metrics + - port: 16069 + protocol: TCP + targetPort: 16069 + name: port-16069 + selector: + app: ssv-node-69 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-69 + name: ssv-node-69 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-69 + template: + metadata: + labels: + app: ssv-node-69 + spec: + containers: + - name: ssv-node-69 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12069 + name: port-12069 + protocol: UDP + hostPort: 12069 + - containerPort: 13069 + name: port-13069 + hostPort: 13069 + - containerPort: 15069 + name: port-15069 + hostPort: 15069 + - containerPort: 16069 + name: port-16069 + hostPort: 16069 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15069" + - name: SSV_API_PORT + value: "16069" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-69 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-69-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-69 + persistentVolumeClaim: + claimName: ssv-node-69 + - name: ssv-node-69-cm + configMap: + name: ssv-node-69-cm + hostNetwork: true diff --git a/.k8/stage/ssv-exporter-2.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml similarity index 52% rename from .k8/stage/ssv-exporter-2.yml rename to .k8/hetzner-stage/ssv-node-7-deployment.yml index 7bcef4937a..358fa28811 100644 --- a/.k8/stage/ssv-exporter-2.yml +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -1,44 +1,12 @@ --- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: ssv-exporter-v2 - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ws-exporter-v2.REPLACE_DOMAIN_SUFFIX" - gateways: - - ssv-exporter-v2 - http: - - route: - - destination: - host: ssv-exporter-v2 - port: - number: 14007 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: ssv-exporter-v2 - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway-int - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ws-exporter-v2.REPLACE_DOMAIN_SUFFIX" ---- apiVersion: v1 kind: Service metadata: - name: ssv-exporter-v2 + name: ssv-node-7-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-exporter-v2 + app: ssv-node-7 + prometheus/app: ssv-node spec: type: ClusterIP ports: @@ -50,27 +18,23 @@ spec: protocol: TCP targetPort: 13007 name: port-13007 - - port: 14007 - protocol: TCP - targetPort: 14007 - name: port-14007 - port: 15007 protocol: TCP targetPort: 15007 - name: port-15007 + name: metrics - port: 16007 protocol: TCP targetPort: 16007 name: port-16007 selector: - app: ssv-exporter-v2 + app: ssv-node-7 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-exporter-v2 - name: ssv-exporter-v2 + app: ssv-node-7 + name: ssv-node-7 namespace: REPLACE_NAMESPACE spec: replicas: 1 @@ -78,41 +42,30 @@ spec: type: Recreate selector: matchLabels: - app: ssv-exporter-v2 + app: ssv-node-7 template: metadata: labels: - app: ssv-exporter-v2 + app: ssv-node-7 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-exporter-v2 + - name: ssv-node-7 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: - cpu: REPLACE_EXPORTER_CPU_LIMIT - memory: REPLACE_EXPORTER_MEM_LIMIT + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - containerPort: 12007 name: port-12007 - hostPort: 12007 protocol: UDP + hostPort: 12007 - containerPort: 13007 name: port-13007 hostPort: 13007 - - containerPort: 14007 - name: port-14007 - hostPort: 14007 - containerPort: 15007 name: port-15007 hostPort: 15007 @@ -136,13 +89,19 @@ spec: - name: LOG_LEVEL value: "debug" - name: DEBUG_SERVICES - value: "ssv.*" + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -151,37 +110,25 @@ spec: value: "16007" - name: ENABLE_PROFILE value: "true" - - name: UDP_PORT - value: "12007" - - name: TCP_PORT - value: "13007" - - name: WS_API_PORT - value: "14007" - - name: FULLNODE - value: "true" - - name: EXPORTER - value: "true" - name: DISCOVERY_TRACE - value: "false" + value: 'false' - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS value: "false" - - name: SUBNETS - value: "0xffffffffffffffffffffffffffffffff" volumeMounts: - mountPath: /data - name: ssv-exporter-v2 + name: ssv-node-7 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-exporter-v2-cm + name: ssv-node-7-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-exporter-v2 + - name: ssv-node-7 persistentVolumeClaim: - claimName: ssv-exporter-v2 - - name: ssv-exporter-v2-cm + claimName: ssv-node-7 + - name: ssv-node-7-cm configMap: - name: ssv-exporter-v2-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-7-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml new file mode 100644 index 0000000000..5649051e6c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-70-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-70 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12070 + protocol: UDP + targetPort: 12070 + name: port-12070 + - port: 13070 + protocol: TCP + targetPort: 13070 + name: port-13070 + - port: 15070 + protocol: TCP + targetPort: 15070 + name: metrics + - port: 16070 + protocol: TCP + targetPort: 16070 + name: port-16070 + selector: + app: ssv-node-70 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-70 + name: ssv-node-70 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-70 + template: + metadata: + labels: + app: ssv-node-70 + spec: + containers: + - name: ssv-node-70 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12070 + name: port-12070 + protocol: UDP + hostPort: 12070 + - containerPort: 13070 + name: port-13070 + hostPort: 13070 + - containerPort: 15070 + name: port-15070 + hostPort: 15070 + - containerPort: 16070 + name: port-16070 + hostPort: 16070 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15070" + - name: SSV_API_PORT + value: "16070" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-70 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-70-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-70 + persistentVolumeClaim: + claimName: ssv-node-70 + - name: ssv-node-70-cm + configMap: + name: ssv-node-70-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml new file mode 100644 index 0000000000..ea98ca411c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-71-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-71 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12071 + protocol: UDP + targetPort: 12071 + name: port-12071 + - port: 13071 + protocol: TCP + targetPort: 13071 + name: port-13071 + - port: 15071 + protocol: TCP + targetPort: 15071 + name: metrics + - port: 16071 + protocol: TCP + targetPort: 16071 + name: port-16071 + selector: + app: ssv-node-71 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-71 + name: ssv-node-71 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-71 + template: + metadata: + labels: + app: ssv-node-71 + spec: + containers: + - name: ssv-node-71 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12071 + name: port-12071 + protocol: UDP + hostPort: 12071 + - containerPort: 13071 + name: port-13071 + hostPort: 13071 + - containerPort: 15071 + name: port-15071 + hostPort: 15071 + - containerPort: 16071 + name: port-16071 + hostPort: 16071 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15071" + - name: SSV_API_PORT + value: "16071" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-71 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-71-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-71 + persistentVolumeClaim: + claimName: ssv-node-71 + - name: ssv-node-71-cm + configMap: + name: ssv-node-71-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml new file mode 100644 index 0000000000..ba1a8d2f07 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-72-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-72 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12072 + protocol: UDP + targetPort: 12072 + name: port-12072 + - port: 13072 + protocol: TCP + targetPort: 13072 + name: port-13072 + - port: 15072 + protocol: TCP + targetPort: 15072 + name: metrics + - port: 16072 + protocol: TCP + targetPort: 16072 + name: port-16072 + selector: + app: ssv-node-72 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-72 + name: ssv-node-72 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-72 + template: + metadata: + labels: + app: ssv-node-72 + spec: + containers: + - name: ssv-node-72 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12072 + name: port-12072 + protocol: UDP + hostPort: 12072 + - containerPort: 13072 + name: port-13072 + hostPort: 13072 + - containerPort: 15072 + name: port-15072 + hostPort: 15072 + - containerPort: 16072 + name: port-16072 + hostPort: 16072 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15072" + - name: SSV_API_PORT + value: "16072" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-72 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-72-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-72 + persistentVolumeClaim: + claimName: ssv-node-72 + - name: ssv-node-72-cm + configMap: + name: ssv-node-72-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml new file mode 100644 index 0000000000..a19ef9795b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-8-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-8 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12008 + protocol: UDP + targetPort: 12008 + name: port-12008 + - port: 13008 + protocol: TCP + targetPort: 13008 + name: port-13008 + - port: 15008 + protocol: TCP + targetPort: 15008 + name: metrics + - port: 16008 + protocol: TCP + targetPort: 16008 + name: port-16008 + selector: + app: ssv-node-8 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-8 + name: ssv-node-8 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-8 + template: + metadata: + labels: + app: ssv-node-8 + spec: + containers: + - name: ssv-node-8 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12008 + name: port-12008 + protocol: UDP + hostPort: 12008 + - containerPort: 13008 + name: port-13008 + hostPort: 13008 + - containerPort: 15008 + name: port-15008 + hostPort: 15008 + - containerPort: 16008 + name: port-16008 + hostPort: 16008 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15008" + - name: SSV_API_PORT + value: "16008" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-8 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-8-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-8 + persistentVolumeClaim: + claimName: ssv-node-8 + - name: ssv-node-8-cm + configMap: + name: ssv-node-8-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml new file mode 100644 index 0000000000..c2868e46c5 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-9-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-9 + prometheus/app: ssv-node +spec: + type: ClusterIP + ports: + - port: 12009 + protocol: UDP + targetPort: 12009 + name: port-12009 + - port: 13009 + protocol: TCP + targetPort: 13009 + name: port-13009 + - port: 15009 + protocol: TCP + targetPort: 15009 + name: metrics + - port: 16009 + protocol: TCP + targetPort: 16009 + name: port-16009 + selector: + app: ssv-node-9 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-9 + name: ssv-node-9 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-9 + template: + metadata: + labels: + app: ssv-node-9 + spec: + containers: + - name: ssv-node-9 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12009 + name: port-12009 + protocol: UDP + hostPort: 12009 + - containerPort: 13009 + name: port-13009 + hostPort: 13009 + - containerPort: 15009 + name: port-15009 + hostPort: 15009 + - containerPort: 16009 + name: port-16009 + hostPort: 16009 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15009" + - name: SSV_API_PORT + value: "16009" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "false" + volumeMounts: + - mountPath: /data + name: ssv-node-9 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-9-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-9 + persistentVolumeClaim: + claimName: ssv-node-9 + - name: ssv-node-9-cm + configMap: + name: ssv-node-9-cm + hostNetwork: true diff --git a/.k8/stage/boot-node-2-deployment.yml b/.k8/production/holesky/boot-node-holesky-deployment.yml similarity index 57% rename from .k8/stage/boot-node-2-deployment.yml rename to .k8/production/holesky/boot-node-holesky-deployment.yml index 48bb6d8e1e..daa89e7c9b 100644 --- a/.k8/stage/boot-node-2-deployment.yml +++ b/.k8/production/holesky/boot-node-holesky-deployment.yml @@ -2,24 +2,24 @@ apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: - name: boot-node-2 + name: boot-node-holesky namespace: REPLACE_NAMESPACE spec: hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" + - "boot-node-holesky.REPLACE_DOMAIN_SUFFIX" gateways: - - boot-node-2 + - boot-node-holesky http: - route: - destination: - host: boot-node-2-svc + host: boot-node-holesky-svc port: - number: 5001 + number: 5003 --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: - name: boot-node-2 + name: boot-node-holesky namespace: REPLACE_NAMESPACE spec: selector: @@ -30,39 +30,35 @@ spec: name: http protocol: HTTP hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" + - "boot-node-holesky.REPLACE_DOMAIN_SUFFIX" --- apiVersion: v1 kind: Service metadata: - name: boot-node-2-svc + name: boot-node-holesky-svc namespace: REPLACE_NAMESPACE labels: - app: boot-node-2 + app: boot-node-holesky spec: type: ClusterIP ports: - - port: 5679 - protocol: TCP - targetPort: 5679 - name: port-5679 - - port: 4001 + - port: 4003 protocol: UDP - targetPort: 4001 - name: port-4001 - - port: 5001 + targetPort: 4003 + name: port-4003 + - port: 5003 protocol: TCP - targetPort: 5001 - name: port-5001 + targetPort: 5003 + name: port-5003 selector: - app: boot-node-2 + app: boot-node-holesky --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: boot-node-2 - name: boot-node-2 + app: boot-node-holesky + name: boot-node-holesky namespace: REPLACE_NAMESPACE spec: replicas: REPLACE_REPLICAS @@ -70,11 +66,11 @@ spec: type: Recreate selector: matchLabels: - app: boot-node-2 + app: boot-node-holesky template: metadata: labels: - app: boot-node-2 + app: boot-node-holesky spec: affinity: nodeAffinity: @@ -86,29 +82,44 @@ spec: values: - ssv-boot-node containers: - - name: boot-node-2 + - name: boot-node-holesky image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG imagePullPolicy: Always command: ["make", "start-boot-node"] ports: - - containerPort: 5001 - name: port-5001 - hostPort: 5001 + - containerPort: 5003 + name: port-5003 + hostPort: 5003 env: + - name: CONFIG_PATH + value: /config/config.example.yaml - name: BOOT_NODE_PRIVATE_KEY valueFrom: secretKeyRef: name: config-secrets - key: boot_node_2_private_key + key: boot_node_holesky_private_key - name: BOOT_NODE_EXTERNAL_IP valueFrom: secretKeyRef: name: config-secrets - key: boot_node_external_ip + key: boot_node_holesky_external_ip - name: TCP_PORT - value: "5001" + value: "5003" - name: UDP_PORT - value: "4001" + value: "4003" + volumeMounts: + - mountPath: /data/bootnode + name: boot-node-holesky + - mountPath: /data/config.yaml + name: boot-node-holesky-cm + volumes: + - name: boot-node-holesky + persistentVolumeClaim: + claimName: boot-node-holesky + - configMap: + defaultMode: 420 + name: boot-node-holesky-cm + name: boot-node-holesky-cm tolerations: - effect: NoSchedule key: kubernetes.io/role diff --git a/.k8/stage/scripts/deploy-boot-nodes.sh b/.k8/production/holesky/scripts/deploy-boot-nodes.sh similarity index 87% rename from .k8/stage/scripts/deploy-boot-nodes.sh rename to .k8/production/holesky/scripts/deploy-boot-nodes.sh index 738727c99d..bdf1f9dcff 100755 --- a/.k8/stage/scripts/deploy-boot-nodes.sh +++ b/.k8/production/holesky/scripts/deploy-boot-nodes.sh @@ -103,10 +103,9 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/production/holesky" DEPLOY_FILES=( - "boot-node-deployment.yml" - "boot-node-2-deployment.yml" + "boot-node-holesky-deployment.yml" ) if [[ -d $DIR ]]; then @@ -119,10 +118,11 @@ if [[ -d $DIR ]]; then -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ - -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 done fi #deploy -kubectl --context=$K8S_CONTEXT apply -f .k8/stage/boot-node-deployment.yml || exit 1 -kubectl --context=$K8S_CONTEXT apply -f .k8/stage/boot-node-2-deployment.yml || exit 1 \ No newline at end of file +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/production/holesky/scripts/deploy-cluster-1--4.sh b/.k8/production/holesky/scripts/deploy-cluster-1--4.sh new file mode 100755 index 0000000000..cae1c6ca67 --- /dev/null +++ b/.k8/production/holesky/scripts/deploy-cluster-1--4.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/production/holesky" +DEPLOY_FILES=( + "ssv-node-holesky-1-deployment.yml" + "ssv-node-holesky-2-deployment.yml" + "ssv-node-holesky-3-deployment.yml" + "ssv-node-holesky-4-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-exporters.sh b/.k8/production/holesky/scripts/deploy-exporters.sh similarity index 81% rename from .k8/stage/scripts/deploy-exporters.sh rename to .k8/production/holesky/scripts/deploy-exporters.sh index 051e527cfa..794ab468b3 100755 --- a/.k8/stage/scripts/deploy-exporters.sh +++ b/.k8/production/holesky/scripts/deploy-exporters.sh @@ -33,7 +33,7 @@ if [[ -z $6 ]]; then fi if [[ -z $7 ]]; then - echo "Please provide domain suffix" + echo "Pleae provide domain suffix" exit 1 fi @@ -80,10 +80,23 @@ if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE fi -DIR=".k8/stage" +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/production/holesky" DEPLOY_FILES=( - "ssv-exporter-1.yml" - "ssv-exporter-2.yml" + "ssv-full-node-holesky-deployment.yml" ) if [[ -d $DIR ]]; then diff --git a/.k8/production/holesky/ssv-full-node-holesky-deployment.yml b/.k8/production/holesky/ssv-full-node-holesky-deployment.yml new file mode 100644 index 0000000000..dd61084842 --- /dev/null +++ b/.k8/production/holesky/ssv-full-node-holesky-deployment.yml @@ -0,0 +1,182 @@ +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + hosts: + - "ws-ssv-full-node-holesky-1.REPLACE_DOMAIN_SUFFIX" + gateways: + - ssv-full-node-holesky-1 + http: + - route: + - destination: + host: ssv-full-node-holesky-1 + port: + number: 14021 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + selector: + istio: ingressgateway-int + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "ws-ssv-full-node-holesky-1.REPLACE_DOMAIN_SUFFIX" +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE + labels: + app: ssv-full-node-holesky-1 +spec: + type: ClusterIP + ports: + - port: 12021 + protocol: UDP + targetPort: 12021 + name: port-12021 + - port: 13021 + protocol: TCP + targetPort: 13021 + name: port-13021 + - port: 14021 + protocol: TCP + targetPort: 14021 + name: port-14021 + - port: 15021 + protocol: TCP + targetPort: 15021 + name: port-15021 + - port: 16021 + protocol: TCP + targetPort: 16021 + name: port-16021 + selector: + app: ssv-full-node-holesky-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-full-node-holesky-1 + name: ssv-full-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-full-node-holesky-1 + template: + metadata: + labels: + app: ssv-full-node-holesky-1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-full-node-holesky-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_EXPORTER_CPU_LIMIT + memory: REPLACE_EXPORTER_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + hostPort: 12021 + protocol: UDP + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 14021 + name: port-14021 + hostPort: 14021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: UDP_PORT + value: "12021" + - name: TCP_PORT + value: "13021" + - name: WS_API_PORT + value: "14021" + - name: FULLNODE + value: "true" + - name: EXPORTER + value: "true" + - name: MSG_WORKERS_COUNT + value: "1024" + - name: MSG_WORKER_BUFFER_SIZE + value: "2048" + - name: SUBNETS + value: "0xffffffffffffffffffffffffffffffff" + - name: P2P_MAX_PEERS + value: "300" + volumeMounts: + - mountPath: /data + name: ssv-full-node-holesky-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-full-node-holesky-1-cm + volumes: + - name: ssv-full-node-holesky-1 + persistentVolumeClaim: + claimName: ssv-full-node-holesky-1 + - name: ssv-full-node-holesky-1-cm + configMap: + name: ssv-full-node-holesky-1-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-1-deployment.yml b/.k8/production/holesky/ssv-node-holesky-1-deployment.yml new file mode 100644 index 0000000000..bcf728d701 --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-1-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-1-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-1 +spec: + type: ClusterIP + ports: + - port: 12022 + protocol: UDP + targetPort: 12022 + name: port-12022 + - port: 13022 + protocol: TCP + targetPort: 13022 + name: port-13022 + - port: 15022 + protocol: TCP + targetPort: 15022 + name: port-15022 + - port: 16022 + protocol: TCP + targetPort: 16022 + name: port-16022 + selector: + app: ssv-node-holesky-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-1 + name: ssv-node-holesky-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-1 + template: + metadata: + labels: + app: ssv-node-holesky-1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + hostPort: 12022 + protocol: UDP + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-1 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-1-cm + volumes: + - name: ssv-node-holesky-1 + persistentVolumeClaim: + claimName: ssv-node-holesky-1 + - name: ssv-node-holesky-1-cm + configMap: + name: ssv-node-holesky-1-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-2-deployment.yml b/.k8/production/holesky/ssv-node-holesky-2-deployment.yml new file mode 100644 index 0000000000..824db2efed --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-2-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-2-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-2 +spec: + type: ClusterIP + ports: + - port: 12012 + protocol: UDP + targetPort: 12023 + name: port-12023 + - port: 13023 + protocol: TCP + targetPort: 13023 + name: port-13023 + - port: 15023 + protocol: TCP + targetPort: 15023 + name: port-15023 + - port: 16023 + protocol: TCP + targetPort: 16023 + name: port-16023 + selector: + app: ssv-node-holesky-2 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-2 + name: ssv-node-holesky-2 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-2 + template: + metadata: + labels: + app: ssv-node-holesky-2 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12023 + name: port-12023 + hostPort: 12023 + protocol: UDP + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15023" + - name: SSV_API_PORT + value: "16023" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-2 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-2-cm + volumes: + - name: ssv-node-holesky-2 + persistentVolumeClaim: + claimName: ssv-node-holesky-2 + - name: ssv-node-holesky-2-cm + configMap: + name: ssv-node-holesky-2-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-3-deployment.yml b/.k8/production/holesky/ssv-node-holesky-3-deployment.yml new file mode 100644 index 0000000000..0104fc6ee3 --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-3-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-3-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-3 +spec: + type: ClusterIP + ports: + - port: 12024 + protocol: UDP + targetPort: 12024 + name: port-12024 + - port: 13024 + protocol: TCP + targetPort: 13024 + name: port-13024 + - port: 15024 + protocol: TCP + targetPort: 15024 + name: port-15024 + - port: 16024 + protocol: TCP + targetPort: 16024 + name: port-16024 + selector: + app: ssv-node-holesky-3 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-3 + name: ssv-node-holesky-3 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-3 + template: + metadata: + labels: + app: ssv-node-holesky-3 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12024 + name: port-12024 + hostPort: 12024 + protocol: UDP + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15024" + - name: SSV_API_PORT + value: "16024" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-3 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-3-cm + volumes: + - name: ssv-node-holesky-3 + persistentVolumeClaim: + claimName: ssv-node-holesky-3 + - name: ssv-node-holesky-3-cm + configMap: + name: ssv-node-holesky-3-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/holesky/ssv-node-holesky-4-deployment.yml b/.k8/production/holesky/ssv-node-holesky-4-deployment.yml new file mode 100644 index 0000000000..1b454b7814 --- /dev/null +++ b/.k8/production/holesky/ssv-node-holesky-4-deployment.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-holesky-4-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-holesky-4 +spec: + type: ClusterIP + ports: + - port: 12025 + protocol: UDP + targetPort: 12025 + name: port-12025 + - port: 13025 + protocol: TCP + targetPort: 13025 + name: port-13025 + - port: 15025 + protocol: TCP + targetPort: 15025 + name: port-15025 + - port: 16025 + protocol: TCP + targetPort: 16025 + name: port-16025 + selector: + app: ssv-node-holesky-4 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-holesky-4 + name: ssv-node-holesky-4 + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-holesky-4 + template: + metadata: + labels: + app: ssv-node-holesky-4 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-main + - key: beta.kubernetes.io/instance-type + operator: In + values: + - m5a.4xlarge + containers: + - name: ssv-node-holesky-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12025 + name: port-12025 + hostPort: 12025 + protocol: UDP + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 + env: + - name: SHARE_CONFIG + value: "./data1/share.yaml" + - name: LOG_LEVEL + value: "debug" + - name: DB_REPORTING + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: ./data/db-holesky + - name: NETWORK + value: holesky + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: METRICS_API_PORT + value: "15025" + - name: SSV_API_PORT + value: "16025" + - name: ENABLE_PROFILE + value: "true" + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-holesky-4 + - mountPath: /data1/share.yaml + subPath: share.yaml + name: ssv-node-holesky-4-cm + volumes: + - name: ssv-node-holesky-4 + persistentVolumeClaim: + claimName: ssv-node-holesky-4 + - name: ssv-node-holesky-4-cm + configMap: + name: ssv-node-holesky-4-cm + tolerations: + - effect: NoSchedule + key: kubernetes.io/role + operator: Exists + hostNetwork: true diff --git a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml index 7f2616196a..49f3ad5f29 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16017" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-1 diff --git a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml index 2484e7c214..8cfa6f3d6b 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16018" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-2 diff --git a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml index 2b0b836915..1b197f8f51 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16019" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-3 diff --git a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml index 048e021889..5e83a864eb 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16020" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-4 diff --git a/.k8/stage/boot-node-deployment.yml b/.k8/stage/boot-node-deployment.yml deleted file mode 100644 index 7b974cbc7d..0000000000 --- a/.k8/stage/boot-node-deployment.yml +++ /dev/null @@ -1,112 +0,0 @@ ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: boot-node - namespace: REPLACE_NAMESPACE -spec: - hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" - gateways: - - boot-node - http: - - route: - - destination: - host: boot-node-svc - port: - number: 5000 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: boot-node - namespace: REPLACE_NAMESPACE -spec: - selector: - istio: ingressgateway - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "ssv.REPLACE_DOMAIN_SUFFIX" ---- -apiVersion: v1 -kind: Service -metadata: - name: boot-node-svc - namespace: REPLACE_NAMESPACE - labels: - app: boot-node -spec: - type: ClusterIP - ports: - - port: 5678 - protocol: TCP - targetPort: 5678 - name: port-5678 - - port: 4000 - protocol: UDP - targetPort: 4000 - name: port-4000 - - port: 5000 - protocol: TCP - targetPort: 5000 - name: port-5000 - selector: - app: boot-node ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: boot-node - name: boot-node - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: boot-node - template: - metadata: - labels: - app: boot-node - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-boot-node - containers: - - name: boot-node - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - command: ["make", "start-boot-node"] - ports: - - containerPort: 5000 - name: port-5000 - hostPort: 5000 - env: - - name: BOOT_NODE_PRIVATE_KEY - valueFrom: - secretKeyRef: - name: config-secrets - key: boot_node_private_key - - name: BOOT_NODE_EXTERNAL_IP - valueFrom: - secretKeyRef: - name: config-secrets - key: boot_node_external_ip - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/lb-boot-node.yml b/.k8/stage/lb-boot-node.yml deleted file mode 100644 index 07f84a1d0b..0000000000 --- a/.k8/stage/lb-boot-node.yml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: boot-node-lb-svc - namespace: ssv - annotations: - "external-dns.alpha.kubernetes.io/hostname": ssv-lb.stage.bloxinfra.com - "service.beta.kubernetes.io/aws-load-balancer-internal": "0.0.0.0/0" - labels: - app: boot-node -spec: - type: LoadBalancer - ports: - - port: 5000 - protocol: TCP - targetPort: 5000 - name: port-5000 - selector: - app: boot-node diff --git a/.k8/stage/ssv-node-10-deployment.yml b/.k8/stage/ssv-node-10-deployment.yml deleted file mode 100644 index ce73488cf3..0000000000 --- a/.k8/stage/ssv-node-10-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-10-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-10 -spec: - type: ClusterIP - ports: - - port: 12010 - protocol: UDP - targetPort: 12010 - name: port-12010 - - port: 13010 - protocol: TCP - targetPort: 13010 - name: port-13010 - - port: 15010 - protocol: TCP - targetPort: 15010 - name: port-15010 - - port: 16010 - protocol: TCP - targetPort: 16010 - name: port-16010 - selector: - app: ssv-node-10 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-10 - name: ssv-node-10 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-10 - template: - metadata: - labels: - app: ssv-node-10 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-10 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12010 - name: port-12010 - protocol: UDP - hostPort: 12010 - - containerPort: 13010 - name: port-13010 - hostPort: 13010 - - containerPort: 15010 - name: port-15010 - hostPort: 15010 - - containerPort: 16010 - name: port-16010 - hostPort: 16010 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15010" - - name: SSV_API_PORT - value: "16010" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-10 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-10 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-10 - persistentVolumeClaim: - claimName: ssv-node-10 - - name: ssv-cm-validator-options-10 - configMap: - name: ssv-cm-validator-options-10 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-11-deployment.yml b/.k8/stage/ssv-node-11-deployment.yml deleted file mode 100644 index 2bddd3cdeb..0000000000 --- a/.k8/stage/ssv-node-11-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-11-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-11 -spec: - type: ClusterIP - ports: - - port: 12011 - protocol: UDP - targetPort: 12011 - name: port-12011 - - port: 13011 - protocol: TCP - targetPort: 13011 - name: port-13011 - - port: 15011 - protocol: TCP - targetPort: 15011 - name: port-15011 - - port: 16011 - protocol: TCP - targetPort: 16011 - name: port-16011 - selector: - app: ssv-node-11 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-11 - name: ssv-node-11 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-11 - template: - metadata: - labels: - app: ssv-node-11 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-11 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12011 - name: port-12011 - protocol: UDP - hostPort: 12011 - - containerPort: 13011 - name: port-13011 - hostPort: 13011 - - containerPort: 15011 - name: port-15011 - hostPort: 15011 - - containerPort: 16011 - name: port-16011 - hostPort: 16011 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15011" - - name: SSV_API_PORT - value: "16011" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-11 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-11 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-11 - persistentVolumeClaim: - claimName: ssv-node-11 - - name: ssv-cm-validator-options-11 - configMap: - name: ssv-cm-validator-options-11 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-12-deployment.yml b/.k8/stage/ssv-node-12-deployment.yml deleted file mode 100644 index f06afa878f..0000000000 --- a/.k8/stage/ssv-node-12-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-12-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-12 -spec: - type: ClusterIP - ports: - - port: 12012 - protocol: UDP - targetPort: 12012 - name: port-12012 - - port: 13012 - protocol: TCP - targetPort: 13012 - name: port-13012 - - port: 15012 - protocol: TCP - targetPort: 15012 - name: port-15012 - - port: 16012 - protocol: TCP - targetPort: 16012 - name: port-16012 - selector: - app: ssv-node-12 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-12 - name: ssv-node-12 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-12 - template: - metadata: - labels: - app: ssv-node-12 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-12 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12012 - name: port-12012 - protocol: UDP - hostPort: 12012 - - containerPort: 13012 - name: port-13012 - hostPort: 13012 - - containerPort: 15012 - name: port-15012 - hostPort: 15012 - - containerPort: 16012 - name: port-16012 - hostPort: 16012 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15012" - - name: SSV_API_PORT - value: "16012" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-12 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-12 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-12 - persistentVolumeClaim: - claimName: ssv-node-12 - - name: ssv-cm-validator-options-12 - configMap: - name: ssv-cm-validator-options-12 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-9-deployment.yml b/.k8/stage/ssv-node-9-deployment.yml deleted file mode 100644 index 37098e1a0a..0000000000 --- a/.k8/stage/ssv-node-9-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-9-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-9 -spec: - type: ClusterIP - ports: - - port: 12009 - protocol: UDP - targetPort: 12009 - name: port-12009 - - port: 13009 - protocol: TCP - targetPort: 13009 - name: port-13009 - - port: 15009 - protocol: TCP - targetPort: 15009 - name: port-15009 - - port: 16009 - protocol: TCP - targetPort: 16009 - name: port-16009 - selector: - app: ssv-node-9 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-9 - name: ssv-node-9 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-9 - template: - metadata: - labels: - app: ssv-node-9 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-9 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12009 - name: port-12009 - protocol: UDP - hostPort: 12009 - - containerPort: 13009 - name: port-13009 - hostPort: 13009 - - containerPort: 15009 - name: port-15009 - hostPort: 15009 - - containerPort: 16009 - name: port-16009 - hostPort: 16009 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15009" - - name: SSV_API_PORT - value: "16009" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-9 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-9 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-9 - persistentVolumeClaim: - claimName: ssv-node-9 - - name: ssv-cm-validator-options-9 - configMap: - name: ssv-cm-validator-options-9 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-1-deployment.yml b/.k8/stage/ssv-node-v2-1-deployment.yml deleted file mode 100644 index 340d2a3419..0000000000 --- a/.k8/stage/ssv-node-v2-1-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-1-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-1 -spec: - type: ClusterIP - ports: - - port: 12001 - protocol: UDP - targetPort: 12001 - name: port-12001 - - port: 13001 - protocol: TCP - targetPort: 13001 - name: port-13001 - - port: 15001 - protocol: TCP - targetPort: 15001 - name: port-15001 - - port: 16001 - protocol: TCP - targetPort: 16001 - name: port-16001 - selector: - app: ssv-node-v2-1 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-1 - name: ssv-node-v2-1 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-1 - template: - metadata: - labels: - app: ssv-node-v2-1 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-1 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12001 - name: port-12001 - hostPort: 12001 - protocol: UDP - - containerPort: 13001 - name: port-13001 - hostPort: 13001 - - containerPort: 15001 - name: port-15001 - hostPort: 15001 - - containerPort: 16001 - name: port-16001 - hostPort: 16001 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15001" - - name: SSV_API_PORT - value: "16001" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-1 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-1 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-1 - persistentVolumeClaim: - claimName: ssv-node-v2-1 - - name: ssv-cm-validator-options-1 - configMap: - name: ssv-cm-validator-options-1 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-2-deployment.yml b/.k8/stage/ssv-node-v2-2-deployment.yml deleted file mode 100644 index ccb63c8cde..0000000000 --- a/.k8/stage/ssv-node-v2-2-deployment.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-2-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-2 -spec: - type: ClusterIP - ports: - - port: 12002 - protocol: UDP - targetPort: 12002 - name: port-12002 - - port: 13002 - protocol: TCP - targetPort: 13002 - name: port-13002 - - port: 15002 - protocol: TCP - targetPort: 15002 - name: port-15002 - - port: 16002 - protocol: TCP - targetPort: 16002 - name: port-16002 - selector: - app: ssv-node-v2-2 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-2 - name: ssv-node-v2-2 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-2 - template: - metadata: - labels: - app: ssv-node-v2-2 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12002 - name: port-12002 - protocol: UDP - hostPort: 12002 - - containerPort: 13002 - name: port-13002 - hostPort: 13002 - - containerPort: 15002 - name: port-15002 - hostPort: 15002 - - containerPort: 16002 - name: port-16002 - hostPort: 16002 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: TCP_PORT - value: "13002" - - name: UDP_PORT - value: "12002" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15002" - - name: SSV_API_PORT - value: "16002" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-2 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-2 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-2 - persistentVolumeClaim: - claimName: ssv-node-v2-2 - - name: ssv-cm-validator-options-2 - configMap: - name: ssv-cm-validator-options-2 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-3-deployment.yml b/.k8/stage/ssv-node-v2-3-deployment.yml deleted file mode 100644 index d30d7648a5..0000000000 --- a/.k8/stage/ssv-node-v2-3-deployment.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-3-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-3 -spec: - type: ClusterIP - ports: - - port: 12003 - protocol: UDP - targetPort: 12003 - name: port-12003 - - port: 13003 - protocol: TCP - targetPort: 13003 - name: port-13003 - - port: 15003 - protocol: TCP - targetPort: 15003 - name: port-15003 - - port: 16003 - protocol: TCP - targetPort: 16003 - name: port-16003 - selector: - app: ssv-node-v2-3 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-3 - name: ssv-node-v2-3 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-3 - template: - metadata: - labels: - app: ssv-node-v2-3 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-3 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12003 - name: port-12003 - protocol: UDP - hostPort: 12003 - - containerPort: 13003 - name: port-13003 - hostPort: 13003 - - containerPort: 15003 - name: port-15003 - hostPort: 15003 - - containerPort: 16003 - name: port-16003 - hostPort: 16003 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: TCP_PORT - value: "13003" - - name: UDP_PORT - value: "12003" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15003" - - name: SSV_API_PORT - value: "16003" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-3 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-3 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-3 - persistentVolumeClaim: - claimName: ssv-node-v2-3 - - name: ssv-cm-validator-options-3 - configMap: - name: ssv-cm-validator-options-3 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-4-deployment.yml b/.k8/stage/ssv-node-v2-4-deployment.yml deleted file mode 100644 index de012b24f7..0000000000 --- a/.k8/stage/ssv-node-v2-4-deployment.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-4-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-4 -spec: - type: ClusterIP - ports: - - port: 12004 - protocol: UDP - targetPort: 12004 - name: port-12004 - - port: 13004 - protocol: TCP - targetPort: 13004 - name: port-13004 - - port: 15004 - protocol: TCP - targetPort: 15004 - name: port-15004 - - port: 16004 - protocol: TCP - targetPort: 16004 - name: port-16004 - selector: - app: ssv-node-v2-4 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-4 - name: ssv-node-v2-4 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-4 - template: - metadata: - labels: - app: ssv-node-v2-4 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-4 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12004 - name: port-12004 - protocol: UDP - hostPort: 12004 - - containerPort: 13004 - name: port-13004 - hostPort: 13004 - - containerPort: 15004 - name: port-15004 - hostPort: 15004 - - containerPort: 16004 - name: port-16004 - hostPort: 16004 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: TCP_PORT - value: "13004" - - name: UDP_PORT - value: "12004" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15004" - - name: SSV_API_PORT - value: "16004" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-4 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-4 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-4 - persistentVolumeClaim: - claimName: ssv-node-v2-4 - - name: ssv-cm-validator-options-4 - configMap: - name: ssv-cm-validator-options-4 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-5-deployment.yml b/.k8/stage/ssv-node-v2-5-deployment.yml deleted file mode 100644 index 8e0a8436a8..0000000000 --- a/.k8/stage/ssv-node-v2-5-deployment.yml +++ /dev/null @@ -1,159 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-5-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-5 -spec: - type: ClusterIP - ports: - - port: 12005 - protocol: UDP - targetPort: 12005 - name: port-12005 - - port: 13005 - protocol: TCP - targetPort: 13005 - name: port-13005 - - port: 15005 - protocol: TCP - targetPort: 15005 - name: port-15005 - - port: 16005 - protocol: TCP - targetPort: 16005 - name: port-16005 - selector: - app: ssv-node-v2-5 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-5 - name: ssv-node-v2-5 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-5 - template: - metadata: - labels: - app: ssv-node-v2-5 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-5 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12005 - name: port-12005 - protocol: UDP - hostPort: 12005 - - containerPort: 13005 - name: port-13005 - hostPort: 13005 - - containerPort: 15005 - name: port-15005 - hostPort: 15005 - - containerPort: 16005 - name: port-16005 - hostPort: 16005 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15005" - - name: SSV_API_PORT - value: "16005" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-5 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-5 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-5 - persistentVolumeClaim: - claimName: ssv-node-v2-5 - - name: ssv-cm-validator-options-5 - configMap: - name: ssv-cm-validator-options-5 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-6-deployment.yml b/.k8/stage/ssv-node-v2-6-deployment.yml deleted file mode 100644 index 1fddf2a098..0000000000 --- a/.k8/stage/ssv-node-v2-6-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-6-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-6 -spec: - type: ClusterIP - ports: - - port: 12006 - protocol: UDP - targetPort: 12006 - name: port-12006 - - port: 13006 - protocol: TCP - targetPort: 13006 - name: port-13006 - - port: 15006 - protocol: TCP - targetPort: 15006 - name: port-15006 - - port: 16006 - protocol: TCP - targetPort: 16006 - name: port-16006 - selector: - app: ssv-node-v2-6 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-6 - name: ssv-node-v2-6 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-6 - template: - metadata: - labels: - app: ssv-node-v2-6 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-6 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12006 - name: port-12006 - protocol: UDP - hostPort: 12006 - - containerPort: 13006 - name: port-13006 - hostPort: 13006 - - containerPort: 15006 - name: port-15006 - hostPort: 15006 - - containerPort: 16006 - name: port-16006 - hostPort: 16006 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15006" - - name: SSV_API_PORT - value: "16006" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-6 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-6 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-6 - persistentVolumeClaim: - claimName: ssv-node-v2-6 - - name: ssv-cm-validator-options-6 - configMap: - name: ssv-cm-validator-options-6 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-7-deployment.yml b/.k8/stage/ssv-node-v2-7-deployment.yml deleted file mode 100644 index b4a4b93e72..0000000000 --- a/.k8/stage/ssv-node-v2-7-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-7-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-7 -spec: - type: ClusterIP - ports: - - port: 12007 - protocol: UDP - targetPort: 12007 - name: port-12007 - - port: 13007 - protocol: TCP - targetPort: 13007 - name: port-13007 - - port: 15007 - protocol: TCP - targetPort: 15007 - name: port-15007 - - port: 16007 - protocol: TCP - targetPort: 16007 - name: port-16007 - selector: - app: ssv-node-v2-7 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-7 - name: ssv-node-v2-7 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-7 - template: - metadata: - labels: - app: ssv-node-v2-7 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-7 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12007 - name: port-12007 - protocol: UDP - hostPort: 12007 - - containerPort: 13007 - name: port-13007 - hostPort: 13007 - - containerPort: 15007 - name: port-15007 - hostPort: 15007 - - containerPort: 16007 - name: port-16007 - hostPort: 16007 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15007" - - name: SSV_API_PORT - value: "16007" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-7 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-7 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-7 - persistentVolumeClaim: - claimName: ssv-node-v2-7 - - name: ssv-cm-validator-options-7 - configMap: - name: ssv-cm-validator-options-7 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v2-8-deployment.yml b/.k8/stage/ssv-node-v2-8-deployment.yml deleted file mode 100644 index af3607ba5c..0000000000 --- a/.k8/stage/ssv-node-v2-8-deployment.yml +++ /dev/null @@ -1,161 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v2-8-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v2-8 -spec: - type: ClusterIP - ports: - - port: 12008 - protocol: UDP - targetPort: 12008 - name: port-12008 - - port: 13008 - protocol: TCP - targetPort: 13008 - name: port-13008 - - port: 15008 - protocol: TCP - targetPort: 15008 - name: port-15008 - - port: 16008 - protocol: TCP - targetPort: 16008 - name: port-16008 - selector: - app: ssv-node-v2-8 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v2-8 - name: ssv-node-v2-8 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v2-8 - template: - metadata: - labels: - app: ssv-node-v2-8 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v2-8 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: 4000m - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12008 - name: port-12008 - protocol: UDP - hostPort: 12008 - - containerPort: 13008 - name: port-13008 - hostPort: 13008 - - containerPort: 15008 - name: port-15008 - hostPort: 15008 - - containerPort: 16008 - name: port-16008 - hostPort: 16008 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: LOG_LEVEL - value: "debug" - - name: DEBUG_SERVICES - value: "ssv/*." - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: DB_REPORTING - value: "false" - - name: METRICS_API_PORT - value: "15008" - - name: SSV_API_PORT - value: "16008" - - name: ENABLE_PROFILE - value: "true" - - name: DISCOVERY_TRACE - value: 'false' - - name: PUBSUB_TRACE - value: 'false' - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v2-8 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-8 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 - volumes: - - name: ssv-node-v2-8 - persistentVolumeClaim: - claimName: ssv-node-v2-8 - - name: ssv-cm-validator-options-8 - configMap: - name: ssv-cm-validator-options-8 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-1-deployment.yml b/.k8/stage/ssv-node-v3-1-deployment.yml deleted file mode 100644 index 59eeab296a..0000000000 --- a/.k8/stage/ssv-node-v3-1-deployment.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-1-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-1 -spec: - type: ClusterIP - ports: - - port: 12301 - protocol: UDP - targetPort: 12301 - name: port-12301 - - port: 13301 - protocol: TCP - targetPort: 13301 - name: port-13301 - - port: 15301 - protocol: TCP - targetPort: 15301 - name: port-15301 - - port: 16301 - protocol: TCP - targetPort: 16301 - name: port-16301 - - port: 16301 - protocol: TCP - targetPort: 16301 - name: port-16301 - selector: - app: ssv-node-v3-1 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-1 - name: ssv-node-v3-1 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-1 - template: - metadata: - labels: - app: ssv-node-v3-1 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-1 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12301 - name: port-12301 - protocol: UDP - hostPort: 12301 - - containerPort: 13301 - name: port-13301 - hostPort: 13301 - - containerPort: 15301 - name: port-15301 - hostPort: 15301 - - containerPort: 16301 - name: port-16301 - hostPort: 16301 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15301" - - name: SSV_API_PORT - value: "16301" - - name: ENABLE_PROFILE - value: "true" - - name: WS_API_PORT - value: "16301" - - name: FULLNODE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-1 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-1 - volumes: - - name: ssv-node-v3-1 - persistentVolumeClaim: - claimName: ssv-node-v3-1 - - name: ssv-cm-validator-options-v3-1 - configMap: - name: ssv-cm-validator-options-v3-1 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-2-deployment.yml b/.k8/stage/ssv-node-v3-2-deployment.yml deleted file mode 100644 index 2daed3c6a8..0000000000 --- a/.k8/stage/ssv-node-v3-2-deployment.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-2-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-2 -spec: - type: ClusterIP - ports: - - port: 12302 - protocol: UDP - targetPort: 12302 - name: port-12302 - - port: 13302 - protocol: TCP - targetPort: 13302 - name: port-13302 - - port: 15302 - protocol: TCP - targetPort: 15302 - name: port-15302 - - port: 16302 - protocol: TCP - targetPort: 16302 - name: port-16302 - selector: - app: ssv-node-v3-2 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-2 - name: ssv-node-v3-2 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-2 - template: - metadata: - labels: - app: ssv-node-v3-2 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-2 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12302 - name: port-12302 - protocol: UDP - hostPort: 12302 - - containerPort: 13302 - name: port-13302 - hostPort: 13302 - - containerPort: 15302 - name: port-15302 - hostPort: 15302 - - containerPort: 16302 - name: port-16302 - hostPort: 16302 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15302" - - name: SSV_API_PORT - value: "16302" - - name: ENABLE_PROFILE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-2 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-2 - volumes: - - name: ssv-node-v3-2 - persistentVolumeClaim: - claimName: ssv-node-v3-2 - - name: ssv-cm-validator-options-v3-2 - configMap: - name: ssv-cm-validator-options-v3-2 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-3-deployment.yml b/.k8/stage/ssv-node-v3-3-deployment.yml deleted file mode 100644 index 64bfbbe759..0000000000 --- a/.k8/stage/ssv-node-v3-3-deployment.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-3-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-3 -spec: - type: ClusterIP - ports: - - port: 12303 - protocol: UDP - targetPort: 12303 - name: port-12303 - - port: 13303 - protocol: TCP - targetPort: 13303 - name: port-13303 - - port: 15303 - protocol: TCP - targetPort: 15303 - name: port-15303 - - port: 16303 - protocol: TCP - targetPort: 16303 - name: port-16303 - - port: 16303 - protocol: TCP - targetPort: 16303 - name: port-16303 - selector: - app: ssv-node-v3-3 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-3 - name: ssv-node-v3-3 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-3 - template: - metadata: - labels: - app: ssv-node-v3-3 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-3 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12303 - name: port-12303 - protocol: UDP - hostPort: 12303 - - containerPort: 13303 - name: port-13303 - hostPort: 13303 - - containerPort: 15303 - name: port-15303 - hostPort: 15303 - - containerPort: 16303 - name: port-16303 - hostPort: 16303 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15303" - - name: SSV_API_PORT - value: "16303" - - name: ENABLE_PROFILE - value: "true" - - name: WS_API_PORT - value: "16303" - - name: FULLNODE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-3 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-3 - volumes: - - name: ssv-node-v3-3 - persistentVolumeClaim: - claimName: ssv-node-v3-3 - - name: ssv-cm-validator-options-v3-3 - configMap: - name: ssv-cm-validator-options-v3-3 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-4-deployment.yml b/.k8/stage/ssv-node-v3-4-deployment.yml deleted file mode 100644 index b13efb4f5c..0000000000 --- a/.k8/stage/ssv-node-v3-4-deployment.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ssv-node-v3-4-svc - namespace: REPLACE_NAMESPACE - labels: - app: ssv-node-v3-4 -spec: - type: ClusterIP - ports: - - port: 12304 - protocol: UDP - targetPort: 12304 - name: port-12304 - - port: 13304 - protocol: TCP - targetPort: 13304 - name: port-13304 - - port: 15304 - protocol: TCP - targetPort: 15304 - name: port-15304 - - port: 16304 - protocol: TCP - targetPort: 16304 - name: port-16304 - selector: - app: ssv-node-v3-4 ---- -apiVersion: REPLACE_API_VERSION -kind: Deployment -metadata: - labels: - app: ssv-node-v3-4 - name: ssv-node-v3-4 - namespace: REPLACE_NAMESPACE -spec: - replicas: REPLACE_REPLICAS - strategy: - type: Recreate - selector: - matchLabels: - app: ssv-node-v3-4 - template: - metadata: - labels: - app: ssv-node-v3-4 - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main - containers: - - name: ssv-node-v3-4 - image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG - imagePullPolicy: Always - resources: - limits: - cpu: REPLACE_NODES_CPU_LIMIT - memory: REPLACE_NODES_MEM_LIMIT - command: ["make", "start-node"] - ports: - - containerPort: 12304 - name: port-12304 - protocol: UDP - hostPort: 12304 - - containerPort: 13304 - name: port-13304 - hostPort: 13304 - - containerPort: 15304 - name: port-15304 - hostPort: 15304 - - containerPort: 16304 - name: port-16304 - hostPort: 16304 - env: - - name: SHARE_CONFIG - value: "./data/share.yaml" - - name: CONFIG_PATH - valueFrom: - secretKeyRef: - name: config-secrets - key: config_path - - name: ABI_VERSION - valueFrom: - secretKeyRef: - name: config-secrets - key: abi_version - optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - - name: LOG_LEVEL - value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" - - name: DISCOVERY_TYPE_KEY - value: "discv5" - - name: CONSENSUS_TYPE - value: "validation" - - name: HOST_DNS - value: "" - - name: HOST_ADDRESS - value: "" - - name: DB_PATH - value: "./data/db-jato-v2" - - name: NETWORK - value: "jato-v2-stage" - - name: METRICS_API_PORT - value: "15304" - - name: SSV_API_PORT - value: "16304" - - name: ENABLE_PROFILE - value: "true" - - name: BUILDER_PROPOSALS - value: "true" - volumeMounts: - - mountPath: /data - name: ssv-node-v3-4 - - mountPath: /data/share.yaml - subPath: share.yaml - name: ssv-cm-validator-options-v3-4 - volumes: - - name: ssv-node-v3-4 - persistentVolumeClaim: - claimName: ssv-node-v3-4 - - name: ssv-cm-validator-options-v3-4 - configMap: - name: ssv-cm-validator-options-v3-4 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists - hostNetwork: true diff --git a/Dockerfile b/Dockerfile index ef49ad2716..44c362dcfc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,6 @@ RUN apt-get update && \ git=1:2.39.2-1.1 \ zip=3.0-13 \ unzip=6.0-28 \ - wget=1.21.3-1+b2 \ g++=4:12.2.0-3 \ gcc-aarch64-linux-gnu=4:12.2.0-3 \ bzip2=1.0.8-5+b1 \ diff --git a/Makefile b/Makefile index 9fc80cecde..62122fa53b 100644 --- a/Makefile +++ b/Makefile @@ -90,7 +90,7 @@ docker-integration-test: #Build .PHONY: build build: - CGO_ENABLED=1 go build -o ./bin/ssvnode -ldflags "-X main.Version=`git describe --tags $(git rev-list --tags --max-count=1)`" ./cmd/ssvnode/ + CGO_ENABLED=1 go build -o ./bin/ssvnode -ldflags "-X main.Commit=`git rev-parse HEAD` -X main.Branch=`git symbolic-ref --short HEAD` -X main.Version=`git describe --tags $(git rev-list --tags --max-count=1)`" ./cmd/ssvnode/ .PHONY: start-node start-node: diff --git a/beacon/goclient/goclient.go b/beacon/goclient/goclient.go index 8fe1216155..de3ed18c0d 100644 --- a/beacon/goclient/goclient.go +++ b/beacon/goclient/goclient.go @@ -20,7 +20,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ) @@ -122,6 +122,7 @@ type Client interface { eth2client.BlindedBeaconBlockProposalProvider eth2client.BlindedBeaconBlockSubmitter eth2client.ValidatorRegistrationsSubmitter + eth2client.VoluntaryExitSubmitter } type NodeClientProvider interface { @@ -147,7 +148,7 @@ type goClient struct { } // New init new client and go-client instance -func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTicker slot_ticker.Ticker) (beaconprotocol.BeaconNode, error) { +func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTickerProvider slotticker.Provider) (beaconprotocol.BeaconNode, error) { logger.Info("consensus client: connecting", fields.Address(opt.BeaconNodeAddr), fields.Network(string(opt.Network.BeaconNetwork))) httpClient, err := http.New(opt.Context, @@ -161,9 +162,6 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op return nil, errors.WithMessage(err, "failed to create http client") } - tickerChan := make(chan phase0.Slot, 32) - slotTicker.Subscribe(tickerChan) - client := &goClient{ log: logger, ctx: opt.Context, @@ -190,7 +188,7 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op ) // Start registration submitter. - go client.registrationSubmitter(tickerChan) + go client.registrationSubmitter(slotTickerProvider) return client, nil } diff --git a/beacon/goclient/proposer.go b/beacon/goclient/proposer.go index cb48d5e33c..38d7f4f565 100644 --- a/beacon/goclient/proposer.go +++ b/beacon/goclient/proposer.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/slotticker" ) const ( @@ -230,9 +231,15 @@ func (gc *goClient) createValidatorRegistration(pubkey []byte, feeRecipient bell return signedReg } -func (gc *goClient) registrationSubmitter(slots <-chan phase0.Slot) { - for currentSlot := range slots { - gc.submitRegistrationsFromCache(currentSlot) +func (gc *goClient) registrationSubmitter(slotTickerProvider slotticker.Provider) { + ticker := slotTickerProvider() + for { + select { + case <-gc.ctx.Done(): + return + case <-ticker.Next(): + gc.submitRegistrationsFromCache(ticker.Slot()) + } } } diff --git a/beacon/goclient/voluntary_exit.go b/beacon/goclient/voluntary_exit.go new file mode 100644 index 0000000000..bb2dfaa62f --- /dev/null +++ b/beacon/goclient/voluntary_exit.go @@ -0,0 +1,10 @@ +package goclient + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" +) + +func (gc *goClient) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + return errors.New("not implemented") +} diff --git a/cli/bootnode/boot_node.go b/cli/bootnode/boot_node.go index ddf69d71e4..ae9a767aa7 100644 --- a/cli/bootnode/boot_node.go +++ b/cli/bootnode/boot_node.go @@ -1,6 +1,8 @@ package bootnode import ( + "fmt" + "github.com/bloxapp/ssv/utils/commons" "log" "github.com/bloxapp/ssv/logging" @@ -27,6 +29,8 @@ var StartBootNodeCmd = &cobra.Command{ Use: "start-boot-node", Short: "Starts boot node for discovery based ENR", Run: func(cmd *cobra.Command, args []string) { + commons.SetBuildData(cmd.Parent().Short, cmd.Parent().Version) + if err := cleanenv.ReadConfig(globalArgs.ConfigPath, &cfg); err != nil { log.Fatal(err) } @@ -41,12 +45,15 @@ var StartBootNodeCmd = &cobra.Command{ MaxBackups: cfg.LogFileBackups, }, ) + if err != nil { log.Fatal(err) } logger := zap.L() + logger.Info(fmt.Sprintf("starting %v", commons.GetBuildData())) + bootNode, err := bootnode.New(cfg.Options) if err != nil { logger.Fatal("failed to set up boot node", zap.Error(err)) diff --git a/cli/operator/node.go b/cli/operator/node.go index 4dd14f558a..302e7d68fe 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -2,7 +2,9 @@ package operator import ( "context" + "crypto/rsa" "crypto/x509" + "encoding/base64" "fmt" "log" "math/big" @@ -19,7 +21,6 @@ import ( "github.com/bloxapp/ssv/api/handlers" apiserver "github.com/bloxapp/ssv/api/server" - "github.com/bloxapp/ssv/beacon/goclient" global_config "github.com/bloxapp/ssv/cli/config" "github.com/bloxapp/ssv/ekm" @@ -34,6 +35,7 @@ import ( ssv_identity "github.com/bloxapp/ssv/identity" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/migrations" "github.com/bloxapp/ssv/monitoring/metrics" "github.com/bloxapp/ssv/monitoring/metricsreporter" @@ -42,9 +44,11 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/nodeprobe" "github.com/bloxapp/ssv/operator" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -72,13 +76,10 @@ type config struct { MetricsAPIPort int `yaml:"MetricsAPIPort" env:"METRICS_API_PORT" env-description:"Port to listen on for the metrics API."` EnableProfile bool `yaml:"EnableProfile" env:"ENABLE_PROFILE" env-description:"flag that indicates whether go profiling tools are enabled"` NetworkPrivateKey string `yaml:"NetworkPrivateKey" env:"NETWORK_PRIVATE_KEY" env-description:"private key for network identity"` - - WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` - WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` - - SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` - - LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` + WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` + SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` + LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` } var cfg config @@ -92,11 +93,21 @@ var StartNodeCmd = &cobra.Command{ Use: "start-node", Short: "Starts an instance of SSV node", Run: func(cmd *cobra.Command, args []string) { - logger, err := setupGlobal(cmd) + commons.SetBuildData(cmd.Parent().Short, cmd.Parent().Version) + + logger, err := setupGlobal() if err != nil { log.Fatal("could not create logger", err) } + defer logging.CapturePanic(logger) + + logger.Info(fmt.Sprintf("starting %v", commons.GetBuildData())) + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + networkConfig, err := setupSSVNetwork(logger) if err != nil { logger.Fatal("could not setup network", zap.Error(err)) @@ -124,32 +135,20 @@ var StartNodeCmd = &cobra.Command{ cfg.P2pNetworkConfig.Ctx = cmd.Context() permissioned := func() bool { - currentEpoch := uint64(networkConfig.Beacon.EstimatedCurrentEpoch()) - return currentEpoch >= cfg.P2pNetworkConfig.PermissionedActivateEpoch && currentEpoch < cfg.P2pNetworkConfig.PermissionedDeactivateEpoch + currentEpoch := networkConfig.Beacon.EstimatedCurrentEpoch() + return currentEpoch < networkConfig.PermissionlessActivationEpoch } - cfg.P2pNetworkConfig.Permissioned = permissioned - cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) - cfg.P2pNetworkConfig.NodeStorage = nodeStorage - cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) - cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode - cfg.P2pNetworkConfig.Network = networkConfig - - p2pNetwork := setupP2P(logger, db) - - slotTicker := slot_ticker.NewTicker(cmd.Context(), networkConfig) - - metricsReporter := metricsreporter.New( - metricsreporter.WithLogger(logger), - ) + slotTickerProvider := func() slotticker.SlotTicker { + return slotticker.New(networkConfig) + } cfg.ConsensusClient.Context = cmd.Context() - cfg.ConsensusClient.Graffiti = []byte("SSV.Network") cfg.ConsensusClient.GasLimit = spectypes.DefaultGasLimit cfg.ConsensusClient.Network = networkConfig.Beacon.GetNetwork() - consensusClient := setupConsensusClient(logger, operatorData.ID, slotTicker) + consensusClient := setupConsensusClient(logger, operatorData.ID, slotTickerProvider) executionClient, err := executionclient.New( cmd.Context(), @@ -166,6 +165,36 @@ var StartNodeCmd = &cobra.Command{ logger.Fatal("could not connect to execution client", zap.Error(err)) } + var validatorCtrl validator.Controller + cfg.P2pNetworkConfig.Permissioned = permissioned + cfg.P2pNetworkConfig.NodeStorage = nodeStorage + cfg.P2pNetworkConfig.OperatorPubKeyHash = format.OperatorID(operatorData.PublicKey) + cfg.P2pNetworkConfig.OperatorID = func() spectypes.OperatorID { + return validatorCtrl.GetOperatorData().ID + } + cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode + cfg.P2pNetworkConfig.Network = networkConfig + + validatorsMap := validatorsmap.New(cmd.Context()) + + dutyStore := dutystore.New() + cfg.SSVOptions.DutyStore = dutyStore + + messageValidator := validation.NewMessageValidator( + networkConfig, + validation.WithNodeStorage(nodeStorage), + validation.WithLogger(logger), + validation.WithMetrics(metricsReporter), + validation.WithDutyStore(dutyStore), + validation.WithOwnOperatorID(operatorData.ID), + ) + + cfg.P2pNetworkConfig.Metrics = metricsReporter + cfg.P2pNetworkConfig.MessageValidator = messageValidator + cfg.SSVOptions.ValidatorOptions.MessageValidator = messageValidator + + p2pNetwork := setupP2P(logger, db) + cfg.SSVOptions.Context = cmd.Context() cfg.SSVOptions.DB = db cfg.SSVOptions.BeaconNode = consensusClient @@ -178,6 +207,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.Network = p2pNetwork cfg.SSVOptions.ValidatorOptions.Beacon = consensusClient cfg.SSVOptions.ValidatorOptions.KeyManager = keyManager + cfg.SSVOptions.ValidatorOptions.ValidatorsMap = validatorsMap cfg.SSVOptions.ValidatorOptions.ShareEncryptionKeyProvider = nodeStorage.GetPrivateKey cfg.SSVOptions.ValidatorOptions.OperatorData = operatorData @@ -209,12 +239,12 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.StorageMap = storageMap cfg.SSVOptions.ValidatorOptions.Metrics = metricsReporter + cfg.SSVOptions.Metrics = metricsReporter - validatorCtrl := validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) + validatorCtrl = validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) cfg.SSVOptions.ValidatorController = validatorCtrl - cfg.SSVOptions.Metrics = metricsReporter - operatorNode = operator.New(logger, cfg.SSVOptions, slotTicker) + operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider) if cfg.MetricsAPIPort > 0 { go startMetricsHandler(cmd.Context(), logger, db, metricsReporter, cfg.MetricsAPIPort, cfg.EnableProfile) @@ -319,10 +349,7 @@ func init() { global_config.ProcessArgs(&cfg, &globalArgs, StartNodeCmd) } -func setupGlobal(cmd *cobra.Command) (*zap.Logger, error) { - commons.SetBuildData(cmd.Parent().Short, cmd.Parent().Version) - log.Printf("starting SSV node (version %s)", commons.GetBuildData()) - +func setupGlobal() (*zap.Logger, error) { if globalArgs.ConfigPath != "" { if err := cleanenv.ReadConfig(globalArgs.ConfigPath, &cfg); err != nil { return nil, fmt.Errorf("could not read config: %w", err) @@ -424,6 +451,11 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database) (operatorstora cfg.OperatorPrivateKey = rsaencryption.ExtractPrivateKey(privateKey) } + cfg.P2pNetworkConfig.OperatorPrivateKey, err = decodePrivateKey(cfg.OperatorPrivateKey) + if err != nil { + logger.Fatal("could not decode operator private key", zap.Error(err)) + } + operatorPubKey, err := nodeStorage.SetupPrivateKey(cfg.OperatorPrivateKey) if err != nil { logger.Fatal("could not setup operator private key", zap.Error(err)) @@ -447,6 +479,20 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database) (operatorstora return nodeStorage, operatorData } +func decodePrivateKey(key string) (*rsa.PrivateKey, error) { + operatorKeyByte, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return nil, err + } + + sk, err := rsaencryption.ConvertPemToPrivateKey(string(operatorKeyByte)) + if err != nil { + return nil, err + } + + return sk, err +} + func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { networkConfig, err := networkconfig.GetNetworkConfigByName(cfg.SSVOptions.NetworkName) if err != nil { @@ -477,10 +523,7 @@ func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { return networkConfig, nil } -func setupP2P( - logger *zap.Logger, - db basedb.Database, -) network.P2PNetwork { +func setupP2P(logger *zap.Logger, db basedb.Database) network.P2PNetwork { istore := ssv_identity.NewIdentityStore(db) netPrivKey, err := istore.SetupNetworkKey(logger, cfg.NetworkPrivateKey) if err != nil { @@ -494,9 +537,9 @@ func setupP2P( func setupConsensusClient( logger *zap.Logger, operatorID spectypes.OperatorID, - slotTicker slot_ticker.Ticker, + slotTickerProvider slotticker.Provider, ) beaconprotocol.BeaconNode { - cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTicker) + cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTickerProvider) if err != nil { logger.Fatal("failed to create beacon go-client", zap.Error(err), fields.Address(cfg.ConsensusClient.BeaconNodeAddr)) diff --git a/cmd/ssvnode/main.go b/cmd/ssvnode/main.go index 21e68f5e19..621dc8b83c 100644 --- a/cmd/ssvnode/main.go +++ b/cmd/ssvnode/main.go @@ -1,17 +1,23 @@ package main import ( + "fmt" "github.com/bloxapp/ssv/cli" ) -var ( - // AppName is the application name - AppName = "SSV-Node" +// AppName is the application name +var AppName = "SSV-Node" - // Version is the app version - Version = "latest" -) +// Version is the app version +var Version = "latest" + +// Branch is the git branch this version was built on +var Branch = "main" + +// Commit is the git commit this version was built on +var Commit = "unknown" func main() { - cli.Execute(AppName, Version) + version := fmt.Sprintf("%s-%s-%s", Version, Branch, Commit) + cli.Execute(AppName, version) } diff --git a/docs/OPERATOR_GETTING_STARTED.md b/docs/OPERATOR_GETTING_STARTED.md index f46fdd08b1..d99c30ae52 100644 --- a/docs/OPERATOR_GETTING_STARTED.md +++ b/docs/OPERATOR_GETTING_STARTED.md @@ -148,7 +148,8 @@ OperatorPrivateKey: LS0tLS... ### 6. Start SSV Node in Docker -Run the docker image in the same folder you created the `config.yaml`: +Before start, make sure the clock is synced with NTP servers. +Then, run the docker image in the same folder you created the `config.yaml`: ```shell $ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config.yaml -p 13001:13001 -p 12001:12001/udp -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data --log-opt max-size=500m --log-opt max-file=10 -it 'bloxstaking/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node \ diff --git a/ekm/eth_key_manager_signer.go b/ekm/eth_key_manager_signer.go index 21c663a5b1..6d4f098e00 100644 --- a/ekm/eth_key_manager_signer.go +++ b/ekm/eth_key_manager_signer.go @@ -29,9 +29,16 @@ import ( "github.com/bloxapp/ssv/storage/basedb" ) -// minimal att&block epoch/slot distance to protect slashing -var minimalAttSlashingProtectionEpochDistance = phase0.Epoch(0) -var minimalBlockSlashingProtectionSlotDistance = phase0.Slot(0) +const ( + // minSPAttestationEpochGap is the minimum epoch distance used for slashing protection in attestations. + // It defines the smallest allowable gap between the source and target epochs in an existing attestation + // and those in a new attestation, helping to prevent slashable offenses. + minSPAttestationEpochGap = phase0.Epoch(0) + // minSPProposalSlotGap is the minimum slot distance used for slashing protection in block proposals. + // It defines the smallest allowable gap between the current slot and the slot of a new block proposal, + // helping to prevent slashable offenses. + minSPProposalSlotGap = phase0.Slot(0) +) type ethKeyManagerSigner struct { wallet core.Wallet @@ -43,9 +50,17 @@ type ethKeyManagerSigner struct { builderProposals bool } +// StorageProvider provides the underlying KeyManager storage. +type StorageProvider interface { + ListAccounts() ([]core.ValidatorAccount, error) + RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) + RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) + BumpSlashingProtection(pubKey []byte) error +} + // NewETHKeyManagerSigner returns a new instance of ethKeyManagerSigner func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network networkconfig.NetworkConfig, builderProposals bool, encryptionKey string) (spectypes.KeyManager, error) { - signerStore := NewSignerStorage(db, network.Beacon.GetNetwork(), logger) + signerStore := NewSignerStorage(db, network.Beacon, logger) if encryptionKey != "" { err := signerStore.SetEncryptionKey(encryptionKey) if err != nil { @@ -85,6 +100,18 @@ func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network netw }, nil } +func (km *ethKeyManagerSigner) ListAccounts() ([]core.ValidatorAccount, error) { + return km.storage.ListAccounts() +} + +func (km *ethKeyManagerSigner) RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) { + return km.storage.RetrieveHighestAttestation(pubKey) +} + +func (km *ethKeyManagerSigner) RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) { + return km.storage.RetrieveHighestProposal(pubKey) +} + func (km *ethKeyManagerSigner) SignBeaconObject(obj ssz.HashRoot, domain phase0.Domain, pk []byte, domainType phase0.DomainType) (spectypes.Signature, [32]byte, error) { sig, rootSlice, err := km.signBeaconObject(obj, domain, pk, domainType) if err != nil { @@ -260,9 +287,8 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return errors.Wrap(err, "could not check share existence") } if acc == nil { - currentSlot := km.storage.Network().EstimatedCurrentSlot() - if err := km.saveMinimalSlashingProtection(shareKey.GetPublicKey().Serialize(), currentSlot); err != nil { - return errors.Wrap(err, "could not save minimal slashing protection") + if err := km.BumpSlashingProtection(shareKey.GetPublicKey().Serialize()); err != nil { + return errors.Wrap(err, "could not bump slashing protection") } if err := km.saveShare(shareKey); err != nil { return errors.Wrap(err, "could not save share") @@ -272,23 +298,6 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return nil } -func (km *ethKeyManagerSigner) saveMinimalSlashingProtection(pk []byte, currentSlot phase0.Slot) error { - currentEpoch := km.storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance - highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance - - minAttData := minimalAttProtectionData(highestSource, highestTarget) - - if err := km.storage.SaveHighestAttestation(pk, minAttData); err != nil { - return errors.Wrapf(err, "could not save minimal highest attestation for %s", string(pk)) - } - if err := km.storage.SaveHighestProposal(pk, highestProposal); err != nil { - return errors.Wrapf(err, "could not save minimal highest proposal for %s", string(pk)) - } - return nil -} - func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { km.walletLock.Lock() defer km.walletLock.Unlock() @@ -315,28 +324,110 @@ func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { return nil } -func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { - key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") +// BumpSlashingProtection updates the slashing protection data for a given public key. +func (km *ethKeyManagerSigner) BumpSlashingProtection(pubKey []byte) error { + currentSlot := km.storage.BeaconNetwork().EstimatedCurrentSlot() + + // Update highest attestation data for slashing protection. + if err := km.updateHighestAttestation(pubKey, currentSlot); err != nil { + return err + } + + // Update highest proposal data for slashing protection. + if err := km.updateHighestProposal(pubKey, currentSlot); err != nil { + return err + } + + return nil +} + +// updateHighestAttestation updates the highest attestation data for slashing protection. +func (km *ethKeyManagerSigner) updateHighestAttestation(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest attestation data stored for the given public key. + retrievedHighAtt, found, err := km.RetrieveHighestAttestation(pubKey) if err != nil { - return errors.Wrap(err, "could not generate HDKey") + return fmt.Errorf("could not retrieve highest attestation: %w", err) } - account := wallets.NewValidatorAccount("", key, nil, "", nil) - if err := km.wallet.AddValidatorAccount(account); err != nil { - return errors.Wrap(err, "could not save new account") + + currentEpoch := km.storage.BeaconNetwork().EstimatedEpochAtSlot(slot) + minimalSP := km.computeMinimalAttestationSP(currentEpoch) + + // Check if the retrieved highest attestation data is valid and not outdated. + if found && retrievedHighAtt != nil { + if retrievedHighAtt.Source.Epoch >= minimalSP.Source.Epoch || retrievedHighAtt.Target.Epoch >= minimalSP.Target.Epoch { + return nil + } } + + // At this point, either the retrieved attestation data was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection data. + if err := km.storage.SaveHighestAttestation(pubKey, minimalSP); err != nil { + return fmt.Errorf("could not save highest attestation: %w", err) + } + + return nil +} + +// updateHighestProposal updates the highest proposal slot for slashing protection. +func (km *ethKeyManagerSigner) updateHighestProposal(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest proposal slot stored for the given public key. + retrievedHighProp, found, err := km.RetrieveHighestProposal(pubKey) + if err != nil { + return fmt.Errorf("could not retrieve highest proposal: %w", err) + } + + minimalSPSlot := km.computeMinimalProposerSP(slot) + + // Check if the retrieved highest proposal slot is valid and not outdated. + if found && retrievedHighProp != 0 { + if retrievedHighProp >= minimalSPSlot { + return nil + } + } + + // At this point, either the retrieved proposal slot was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection slot. + if err := km.storage.SaveHighestProposal(pubKey, minimalSPSlot); err != nil { + return fmt.Errorf("could not save highest proposal: %w", err) + } + return nil } -func minimalAttProtectionData(source, target phase0.Epoch) *phase0.AttestationData { +// computeMinimalAttestationSP calculates the minimal safe attestation data for slashing protection. +// It takes the current epoch as an argument and returns an AttestationData object with the minimal safe source and target epochs. +func (km *ethKeyManagerSigner) computeMinimalAttestationSP(epoch phase0.Epoch) *phase0.AttestationData { + // Calculate the highest safe target epoch based on the current epoch and a predefined minimum distance. + highestTarget := epoch + minSPAttestationEpochGap + // The highest safe source epoch is one less than the highest target epoch. + highestSource := highestTarget - 1 + + // Return a new AttestationData object with the calculated source and target epochs. return &phase0.AttestationData{ - BeaconBlockRoot: [32]byte{}, Source: &phase0.Checkpoint{ - Epoch: source, - Root: [32]byte{}, + Epoch: highestSource, }, Target: &phase0.Checkpoint{ - Epoch: target, - Root: [32]byte{}, + Epoch: highestTarget, }, } } + +// computeMinimalProposerSP calculates the minimal safe slot for a block proposal to avoid slashing. +// It takes the current slot as an argument and returns the minimal safe slot. +func (km *ethKeyManagerSigner) computeMinimalProposerSP(slot phase0.Slot) phase0.Slot { + // Calculate the highest safe proposal slot based on the current slot and a predefined minimum distance. + return slot + minSPProposalSlotGap +} + +func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { + key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") + if err != nil { + return errors.Wrap(err, "could not generate HDKey") + } + account := wallets.NewValidatorAccount("", key, nil, "", nil) + if err := km.wallet.AddValidatorAccount(account); err != nil { + return errors.Wrap(err, "could not save new account") + } + return nil +} diff --git a/ekm/signer_key_manager_test.go b/ekm/signer_key_manager_test.go index 4efe2c4fb3..65cf5df24c 100644 --- a/ekm/signer_key_manager_test.go +++ b/ekm/signer_key_manager_test.go @@ -7,26 +7,25 @@ import ( "encoding/hex" "testing" - "github.com/bloxapp/eth2-key-manager/core" - "github.com/bloxapp/eth2-key-manager/wallets/hd" - "github.com/bloxapp/ssv/utils/rsaencryption" - - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/storage/basedb" - "github.com/attestantio/go-eth2-client/spec/altair" "github.com/attestantio/go-eth2-client/spec/bellatrix" "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/bloxapp/eth2-key-manager/core" + "github.com/bloxapp/eth2-key-manager/wallets/hd" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/utils/threshold" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" ) const ( @@ -36,7 +35,7 @@ const ( pk2Str = "8796fafa576051372030a75c41caafea149e4368aebaca21c9f90d9974b3973d5cee7d7874e4ec9ec59fb2c8945b3e01" ) -func testKeyManager(t *testing.T) spectypes.KeyManager { +func testKeyManager(t *testing.T, network *networkconfig.NetworkConfig) spectypes.KeyManager { threshold.Init() logger := logging.TestLogger(t) @@ -44,7 +43,14 @@ func testKeyManager(t *testing.T) spectypes.KeyManager { db, err := getBaseStorage(logger) require.NoError(t, err) - km, err := NewETHKeyManagerSigner(logger, db, networkconfig.TestNetwork, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, nil), + Domain: networkconfig.TestNetwork.Domain, + } + } + + km, err := NewETHKeyManagerSigner(logger, db, *network, true, "") require.NoError(t, err) sk1 := &bls.SecretKey{} @@ -120,7 +126,7 @@ func TestEncryptedKeyManager(t *testing.T) { } func TestSlashing(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) sk1 := &bls.SecretKey{} require.NoError(t, sk1.SetHexString(sk1Str)) @@ -129,12 +135,12 @@ func TestSlashing(t *testing.T) { currentSlot := km.(*ethKeyManagerSigner).storage.Network().EstimatedCurrentSlot() currentEpoch := km.(*ethKeyManagerSigner).storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance + 1 + highestTarget := currentEpoch + minSPAttestationEpochGap + 1 highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance + 1 + highestProposal := currentSlot + minSPProposalSlotGap + 1 attestationData := &phase0.AttestationData{ - Slot: 30, + Slot: currentSlot, Index: 1, BeaconBlockRoot: [32]byte{1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2}, Source: &phase0.Checkpoint{ @@ -272,7 +278,7 @@ func TestSlashing(t *testing.T) { } func TestSlashing_Attestation(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) var secretKeys [4]*bls.SecretKey for i := range secretKeys { @@ -280,8 +286,7 @@ func TestSlashing_Attestation(t *testing.T) { secretKeys[i].SetByCSPRNG() // Equivalent to AddShare but with a custom slot for minimal slashing protection. - minimalSlot := phase0.Slot(64) - err := km.(*ethKeyManagerSigner).saveMinimalSlashingProtection(secretKeys[i].GetPublicKey().Serialize(), minimalSlot) + err := km.(*ethKeyManagerSigner).BumpSlashingProtection(secretKeys[i].GetPublicKey().Serialize()) require.NoError(t, err) err = km.(*ethKeyManagerSigner).saveShare(secretKeys[i]) require.NoError(t, err) @@ -317,6 +322,12 @@ func TestSlashing_Attestation(t *testing.T) { require.NoError(t, err, "expected no slashing") require.NotZero(t, sig, "expected non-zero signature") require.NotZero(t, root, "expected non-zero root") + + highAtt, found, err := km.(*ethKeyManagerSigner).storage.RetrieveHighestAttestation(sk.GetPublicKey().Serialize()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, attestation.Source.Epoch, highAtt.Source.Epoch) + require.Equal(t, attestation.Target.Epoch, highAtt.Target.Epoch) } } @@ -360,7 +371,7 @@ func TestSlashing_Attestation(t *testing.T) { func TestSignRoot(t *testing.T) { require.NoError(t, bls.Init(bls.BLS12_381)) - km := testKeyManager(t) + km := testKeyManager(t, nil) t.Run("pk 1", func(t *testing.T) { pk := &bls.PublicKey{} diff --git a/ekm/signer_storage.go b/ekm/signer_storage.go index 5991e6f321..fc8eadd62e 100644 --- a/ekm/signer_storage.go +++ b/ekm/signer_storage.go @@ -47,17 +47,19 @@ type Storage interface { SetEncryptionKey(newKey string) error ListAccountsTxn(r basedb.Reader) ([]core.ValidatorAccount, error) SaveAccountTxn(rw basedb.ReadWriter, account core.ValidatorAccount) error + + BeaconNetwork() beacon.BeaconNetwork } type storage struct { db basedb.Database - network beacon.Network + network beacon.BeaconNetwork encryptionKey []byte logger *zap.Logger // struct logger is used because core.Storage does not support passing a logger lock sync.RWMutex } -func NewSignerStorage(db basedb.Database, network beacon.Network, logger *zap.Logger) Storage { +func NewSignerStorage(db basedb.Database, network beacon.BeaconNetwork, logger *zap.Logger) Storage { return &storage{ db: db, network: network, @@ -87,7 +89,7 @@ func (s *storage) DropRegistryData() error { } func (s *storage) objPrefix(obj string) []byte { - return []byte(string(s.network.BeaconNetwork) + obj) + return []byte(string(s.network.GetBeaconNetwork()) + obj) } // Name returns storage name. @@ -97,7 +99,7 @@ func (s *storage) Name() string { // Network returns the network storage is related to. func (s *storage) Network() core.Network { - return core.Network(s.network.BeaconNetwork) + return core.Network(s.network.GetBeaconNetwork()) } // SaveWallet stores the given wallet. @@ -406,3 +408,7 @@ func (s *storage) decrypt(data []byte) ([]byte, error) { nonce, ciphertext := data[:nonceSize], data[nonceSize:] return gcm.Open(nil, nonce, ciphertext, nil) } + +func (s *storage) BeaconNetwork() beacon.BeaconNetwork { + return s.network +} diff --git a/eth/ethtest/cluster_liquidated_test.go b/eth/ethtest/cluster_liquidated_test.go new file mode 100644 index 0000000000..46ae795cef --- /dev/null +++ b/eth/ethtest/cluster_liquidated_test.go @@ -0,0 +1,91 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterLiquidatedInput struct { + *CommonTestInput + events []*ClusterLiquidatedEventInput +} + +func NewTestClusterLiquidatedInput(common *CommonTestInput) *testClusterLiquidatedInput { + return &testClusterLiquidatedInput{common, nil} +} + +func (input *testClusterLiquidatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterLiquidatedEventInput struct { + auth *bind.TransactOpts + ownerAddress *ethcommon.Address + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterLiquidatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.ownerAddress == nil: + return fmt.Errorf("validation error: input.ownerAddress is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterLiquidatedInput) prepare( + eventsToDo []*ClusterLiquidatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterLiquidatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Liquidate( + event.auth, + *event.ownerAddress, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/cluster_reactivated_test.go b/eth/ethtest/cluster_reactivated_test.go new file mode 100644 index 0000000000..664625f44b --- /dev/null +++ b/eth/ethtest/cluster_reactivated_test.go @@ -0,0 +1,87 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterReactivatedInput struct { + *CommonTestInput + events []*ClusterReactivatedEventInput +} + +func NewTestClusterReactivatedInput(common *CommonTestInput) *testClusterReactivatedInput { + return &testClusterReactivatedInput{common, nil} +} + +func (input *testClusterReactivatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterReactivatedEventInput struct { + auth *bind.TransactOpts + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterReactivatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterReactivatedInput) prepare( + eventsToDo []*ClusterReactivatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterReactivatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Reactivate( + event.auth, + event.opsIds, + big.NewInt(100_000_000), + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/common_test.go b/eth/ethtest/common_test.go new file mode 100644 index 0000000000..44105dee65 --- /dev/null +++ b/eth/ethtest/common_test.go @@ -0,0 +1,231 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "net/http/httptest" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/golang/mock/gomock" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/eth/eventsyncer" + "github.com/bloxapp/ssv/eth/executionclient" + "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/eth/simulator/simcontract" + "github.com/bloxapp/ssv/monitoring/metricsreporter" + "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator/mocks" +) + +type CommonTestInput struct { + t *testing.T + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + blockNum *uint64 + nodeStorage storage.Storage + doInOneBlock bool +} + +func NewCommonTestInput( + t *testing.T, + sim *simulator.SimulatedBackend, + boundContract *simcontract.Simcontract, + blockNum *uint64, + nodeStorage storage.Storage, + doInOneBlock bool, +) *CommonTestInput { + return &CommonTestInput{ + t: t, + sim: sim, + boundContract: boundContract, + blockNum: blockNum, + nodeStorage: nodeStorage, + doInOneBlock: doInOneBlock, + } +} + +type TestEnv struct { + eventSyncer *eventsyncer.EventSyncer + validators []*testValidatorData + ops []*testOperator + nodeStorage storage.Storage + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + auth *bind.TransactOpts + shares [][]byte + execClient *executionclient.ExecutionClient + rpcServer *rpc.Server + httpSrv *httptest.Server + validatorCtrl *mocks.MockController + mockCtrl *gomock.Controller + followDistance *uint64 +} + +func (e *TestEnv) shutdown() { + if e.mockCtrl != nil { + e.mockCtrl.Finish() + } + + if e.httpSrv != nil { + e.httpSrv.Close() + } + + if e.execClient != nil { + // Always returns nil error + _ = e.execClient.Close() + } +} + +func (e *TestEnv) setup( + t *testing.T, + ctx context.Context, + testAddresses []*ethcommon.Address, + validatorsCount uint64, + operatorsCount uint64, +) error { + if e.followDistance == nil { + e.SetDefaultFollowDistance() + } + logger := zaptest.NewLogger(t) + + // Create operators RSA keys + ops, err := createOperators(operatorsCount, 0) + if err != nil { + return err + } + + validators := make([]*testValidatorData, validatorsCount) + shares := make([][]byte, validatorsCount) + + // Create validators, BLS keys, shares + for i := 0; i < int(validatorsCount); i++ { + validators[i], err = createNewValidator(ops) + if err != nil { + return err + } + + shares[i], err = generateSharesData(validators[i], ops, testAddrAlice, i) + if err != nil { + return err + } + } + + eh, validatorCtrl, mockCtrl, nodeStorage, err := setupEventHandler(t, ctx, logger, ops[0], &testAddrAlice, true) + e.mockCtrl = mockCtrl + e.nodeStorage = nodeStorage + + if err != nil { + return err + } + if validatorCtrl == nil { + return fmt.Errorf("validatorCtrl is empty") + } + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) + + // Create JSON-RPC handler + rpcServer, err := sim.Node.RPCHandler() + e.rpcServer = rpcServer + if err != nil { + return fmt.Errorf("can't create RPC server: %w", err) + } + // Expose handler on a test server with ws open + httpSrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) + e.httpSrv = httpSrv + + addr := "ws:" + strings.TrimPrefix(httpSrv.URL, "http:") + + parsed, err := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) + if err != nil { + return fmt.Errorf("can't parse contract ABI: %w", err) + } + + auth, err := bind.NewKeyedTransactorWithChainID(testKeyAlice, big.NewInt(1337)) + if err != nil { + return err + } + + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + if err != nil { + return fmt.Errorf("deploy contract: %w", err) + } + + sim.Commit() + + // Check contract code at the simulated blockchain + contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + if err != nil { + return fmt.Errorf("get contract code: %w", err) + } + if len(contractCode) == 0 { + return fmt.Errorf("contractCode is empty") + } + + // Create a client and connect to the simulator + e.execClient, err = executionclient.New( + ctx, + addr, + contractAddr, + executionclient.WithLogger(logger), + executionclient.WithFollowDistance(*e.followDistance), + ) + if err != nil { + return err + } + + err = e.execClient.Healthy(ctx) + if err != nil { + return err + } + + e.boundContract, err = simcontract.NewSimcontract(contractAddr, sim) + if err != nil { + return err + } + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + + e.eventSyncer = eventsyncer.New( + nodeStorage, + e.execClient, + eh, + eventsyncer.WithLogger(logger), + eventsyncer.WithMetrics(metricsReporter), + ) + + e.validatorCtrl = validatorCtrl + e.sim = sim + e.auth = auth + e.validators = validators + e.ops = ops + e.shares = shares + + return nil +} + +func (e *TestEnv) SetDefaultFollowDistance() { + // 8 is current production offset + value := uint64(8) + e.followDistance = &value +} + +func (e *TestEnv) CloseFollowDistance(blockNum *uint64) { + for i := uint64(0); i < *e.followDistance; i++ { + commitBlock(e.sim, blockNum) + } +} + +func commitBlock(sim *simulator.SimulatedBackend, blockNum *uint64) { + sim.Commit() + *blockNum++ +} diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go new file mode 100644 index 0000000000..b38dd8ea3d --- /dev/null +++ b/eth/ethtest/eth_e2e_test.go @@ -0,0 +1,309 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +var ( + testKeyAlice, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testKeyBob, _ = crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + + testAddrAlice = crypto.PubkeyToAddress(testKeyAlice.PublicKey) + testAddrBob = crypto.PubkeyToAddress(testKeyBob.PublicKey) +) + +// E2E tests for ETH package +func TestEthExecLayer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddrAlice + testAddresses[1] = &testAddrBob + + cluster := &simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + } + + expectedNonce := registrystorage.Nonce(0) + + testEnv := TestEnv{} + testEnv.SetDefaultFollowDistance() + + defer testEnv.shutdown() + err := testEnv.setup(t, ctx, testAddresses, 7, 4) + require.NoError(t, err) + + var ( + auth = testEnv.auth + nodeStorage = testEnv.nodeStorage + sim = testEnv.sim + boundContract = testEnv.boundContract + ops = testEnv.ops + validators = testEnv.validators + eventSyncer = testEnv.eventSyncer + shares = testEnv.shares + validatorCtrl = testEnv.validatorCtrl + ) + + blockNum := uint64(0x1) + lastHandledBlockNum := uint64(0x1) + + common := NewCommonTestInput(t, sim, boundContract, &blockNum, nodeStorage, true) + // Prepare blocks with events + // Check that the state is empty before the test + // Check SyncHistory doesn't execute any tasks -> doesn't run any of Controller methods + // Check the node storage for existing of operators and a validator + t.Run("SyncHistory happy flow", func(t *testing.T) { + // BLOCK 2. produce OPERATOR ADDED + // Check that there are no registered operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 0, len(operators)) + + opAddedInput := NewOperatorAddedEventInput(common) + opAddedInput.prepare(ops, auth) + opAddedInput.produce() + + testEnv.CloseFollowDistance(&blockNum) + } + + // BLOCK 3: VALIDATOR ADDED: + // Check that there were no operations for Alice Validator + { + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{0, 1}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Run SyncHistory + lastHandledBlockNum, err = eventSyncer.SyncHistory(ctx, lastHandledBlockNum) + require.NoError(t, err) + + //check all the events were handled correctly and block number was increased + require.Equal(t, blockNum-*testEnv.followDistance, lastHandledBlockNum) + fmt.Println("lastHandledBlockNum", lastHandledBlockNum) + + // Check that operators were successfully registered + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Check that validator was registered + shares := nodeStorage.Shares().List(nil) + require.Equal(t, len(valAddInput.events), len(shares)) + + // Check the nonce was bumped + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + } + }) + + // Main difference between "online" events handling and syncing the historical (old) events + // is that here we have to check that the controller was triggered + t.Run("SyncOngoing happy flow", func(t *testing.T) { + go func() { + err = eventSyncer.SyncOngoing(ctx, lastHandledBlockNum+1) + require.NoError(t, err) + }() + + stopChan := make(chan struct{}) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-stopChan: + return + default: + time.Sleep(100 * time.Millisecond) + } + } + }() + + // Step 1: Add more validators + { + validatorCtrl.EXPECT().StartValidator(gomock.Any()).AnyTimes() + + // Check current nonce before start + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{2, 3, 4, 5, 6}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 5000) + + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + // Not sure does this make sense + require.Equal(t, uint64(testEnv.sim.Blockchain.CurrentBlock().Number.Int64()), *common.blockNum) + } + + // Step 2: remove validator + { + validatorCtrl.EXPECT().StopValidator(gomock.Any()).AnyTimes() + + shares := nodeStorage.Shares().List(nil) + require.Equal(t, 7, len(shares)) + + valRemove := NewTestValidatorRemovedEventsInput(common) + valRemove.prepare( + validators, + []uint64{0, 1}, + []uint64{1, 2, 3, 4}, + auth, + cluster, + ) + valRemove.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 500) + + shares = nodeStorage.Shares().List(nil) + require.Equal(t, 5, len(shares)) + + for _, event := range valRemove.events { + valPubKey := event.validator.masterPubKey.Serialize() + valShare := nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + } + } + + // Step 3 Liquidate Cluster + { + validatorCtrl.EXPECT().LiquidateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterLiquidate := NewTestClusterLiquidatedInput(common) + clusterLiquidate.prepare([]*ClusterLiquidatedEventInput{ + { + auth: auth, + ownerAddress: &testAddrAlice, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterLiquidate.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + } + + // Step 4 Reactivate Cluster + { + validatorCtrl.EXPECT().ReactivateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + + // Trigger the event + clusterReactivated := NewTestClusterReactivatedInput(common) + clusterReactivated.prepare([]*ClusterReactivatedEventInput{ + { + auth: auth, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterReactivated.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + shares = nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.False(t, s.Liquidated) + } + } + + // Step 5 Remove some Operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 4, len(operators)) + + opRemoved := NewOperatorRemovedEventInput(common) + opRemoved.prepare([]uint64{1, 2}, auth) + opRemoved.produce() + testEnv.CloseFollowDistance(&blockNum) + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + } + + // Step 6 Update Fee Recipient + { + validatorCtrl.EXPECT().UpdateFeeRecipient(gomock.Any(), gomock.Any()).Times(1) + + setFeeRecipient := NewSetFeeRecipientAddressInput(common) + setFeeRecipient.prepare([]*SetFeeRecipientAddressEventInput{ + {auth, &testAddrBob}, + }) + setFeeRecipient.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + recipientData, found, err := nodeStorage.GetRecipientData(nil, testAddrAlice) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, testAddrBob.String(), recipientData.FeeRecipient.String()) + } + + stopChan <- struct{}{} + }) +} diff --git a/eth/ethtest/operator_added_test.go b/eth/ethtest/operator_added_test.go new file mode 100644 index 0000000000..9a173a5064 --- /dev/null +++ b/eth/ethtest/operator_added_test.go @@ -0,0 +1,86 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/eventparser" +) + +type testOperatorAddedEventInput struct { + op *testOperator + auth *bind.TransactOpts +} + +type ProduceOperatorAddedEventsInput struct { + *CommonTestInput + events []*testOperatorAddedEventInput +} + +func NewOperatorAddedEventInput(common *CommonTestInput) *ProduceOperatorAddedEventsInput { + return &ProduceOperatorAddedEventsInput{common, nil} +} + +func (input *ProduceOperatorAddedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorAddedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.op == nil: + return fmt.Errorf("validation error: input.op is empty") + } + + return nil +} + +func (input *ProduceOperatorAddedEventsInput) prepare( + ops []*testOperator, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorAddedEventInput, len(ops)) + + for i, op := range ops { + input.events[i] = &testOperatorAddedEventInput{op, auth} + } +} + +func (input *ProduceOperatorAddedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + op := event.op + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(input.t, err) + _, err = input.boundContract.SimcontractTransactor.RegisterOperator(event.auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/operator_removed_test.go b/eth/ethtest/operator_removed_test.go new file mode 100644 index 0000000000..5b4dd27822 --- /dev/null +++ b/eth/ethtest/operator_removed_test.go @@ -0,0 +1,83 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" +) + +type testOperatorRemovedEventInput struct { + opId uint64 + auth *bind.TransactOpts +} + +type ProduceOperatorRemovedEventsInput struct { + *CommonTestInput + events []*testOperatorRemovedEventInput +} + +func NewOperatorRemovedEventInput(common *CommonTestInput) *ProduceOperatorRemovedEventsInput { + return &ProduceOperatorRemovedEventsInput{common, nil} +} + +func (input *ProduceOperatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorRemovedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.opId == 0: + return fmt.Errorf("validation error: input.opId is invalid") + } + + return nil +} + +func (input *ProduceOperatorRemovedEventsInput) prepare( + opsIds []uint64, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorRemovedEventInput, len(opsIds)) + + for i, opId := range opsIds { + input.events[i] = &testOperatorRemovedEventInput{opId, auth} + } +} + +func (input *ProduceOperatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + _, err = input.boundContract.SimcontractTransactor.RemoveOperator( + event.auth, + event.opId, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/set_fee_recipient_test.go b/eth/ethtest/set_fee_recipient_test.go new file mode 100644 index 0000000000..14ac7dd263 --- /dev/null +++ b/eth/ethtest/set_fee_recipient_test.go @@ -0,0 +1,80 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +type SetFeeRecipientAddressInput struct { + *CommonTestInput + events []*SetFeeRecipientAddressEventInput +} + +func NewSetFeeRecipientAddressInput(common *CommonTestInput) *SetFeeRecipientAddressInput { + return &SetFeeRecipientAddressInput{common, nil} +} + +func (input *SetFeeRecipientAddressInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type SetFeeRecipientAddressEventInput struct { + auth *bind.TransactOpts + address *ethcommon.Address +} + +func (input *SetFeeRecipientAddressEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.address == nil: + return fmt.Errorf("validation error: input.address is empty") + } + + return nil +} + +func (input *SetFeeRecipientAddressInput) prepare( + eventsToDo []*SetFeeRecipientAddressEventInput, +) { + input.events = eventsToDo +} + +func (input *SetFeeRecipientAddressInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.SetFeeRecipientAddress( + event.auth, + *event.address, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/utils_test.go b/eth/ethtest/utils_test.go new file mode 100644 index 0000000000..289030f7c8 --- /dev/null +++ b/eth/ethtest/utils_test.go @@ -0,0 +1,300 @@ +package ethtest + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "errors" + "fmt" + "math/big" + "testing" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/herumi/bls-eth-go-binary/bls" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/ekm" + "github.com/bloxapp/ssv/eth/contract" + "github.com/bloxapp/ssv/eth/eventhandler" + "github.com/bloxapp/ssv/eth/eventparser" + "github.com/bloxapp/ssv/eth/simulator" + ibftstorage "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/networkconfig" + operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + registrystorage "github.com/bloxapp/ssv/registry/storage" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/blskeygen" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" +) + +type testValidatorData struct { + masterKey *bls.SecretKey + masterPubKey *bls.PublicKey + masterPublicKeys bls.PublicKeys + operatorsShares []*testShare +} + +type testOperator struct { + id uint64 + rsaPub []byte + rsaPriv []byte +} + +type testShare struct { + opId uint64 + sec *bls.SecretKey + pub *bls.PublicKey +} + +func createNewValidator(ops []*testOperator) (*testValidatorData, error) { + validatorData := &testValidatorData{} + sharesCount := uint64(len(ops)) + threshold.Init() + + msk, mpk := blskeygen.GenBLSKeyPair() + secVec := msk.GetMasterSecretKey(int(sharesCount)) + pubKeys := bls.GetMasterPublicKey(secVec) + splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) + if err != nil { + return nil, err + } + + validatorData.operatorsShares = make([]*testShare, sharesCount) + + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { + validatorData.operatorsShares[i-1] = &testShare{ + opId: i, + sec: splitKeys[i], + pub: splitKeys[i].GetPublicKey(), + } + } + + validatorData.masterKey = msk + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys + + return validatorData, nil +} + +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) + + for i := uint64(1); i <= num; i++ { + pb, sk, err := rsaencryption.GenerateKeys() + if err != nil { + return nil, err + } + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, + } + } + + return testOps, nil +} + +func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { + var pubKeys []byte + var encryptedShares []byte + + for i, op := range operators { + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) + if err != nil { + return nil, fmt.Errorf("can't convert public key: %w", err) + } + + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) + if err != nil { + return nil, fmt.Errorf("can't encrypt share: %w", err) + } + + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) + if err != nil { + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) + } + + // check that we encrypt right + shareSecret := &bls.SecretKey{} + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) + if err != nil { + return nil, err + } + if err = shareSecret.SetHexString(string(decryptedSharePrivateKey)); err != nil { + return nil, err + } + + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) + + } + + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) + sig := signed.Serialize() + + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") + } + + sharesData := append(pubKeys, encryptedShares...) + sharesDataSigned := append(sig, sharesData...) + + return sharesDataSigned, nil +} + +func setupEventHandler( + t *testing.T, + ctx context.Context, + logger *zap.Logger, + operator *testOperator, + ownerAddress *ethcommon.Address, + useMockCtrl bool, +) (*eventhandler.EventHandler, *mocks.MockController, *gomock.Controller, operatorstorage.Storage, error) { + db, err := kv.NewInMemory(logger, basedb.Options{ + Ctx: ctx, + }) + if err != nil { + return nil, nil, nil, nil, err + } + + storageMap := ibftstorage.NewStores() + nodeStorage, operatorData := setupOperatorStorage(logger, db, operator, ownerAddress) + testNetworkConfig := networkconfig.TestNetwork + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if err != nil { + return nil, nil, nil, nil, err + } + + ctrl := gomock.NewController(t) + bc := beacon.NewMockBeaconNode(ctrl) + + contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) + if err != nil { + return nil, nil, nil, nil, err + } + + if useMockCtrl { + validatorCtrl := mocks.NewMockController(ctrl) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + + if err != nil { + return nil, nil, nil, nil, err + } + + validatorCtrl.EXPECT().GetOperatorData().Return(operatorData).AnyTimes() + + return eh, validatorCtrl, ctrl, nodeStorage, nil + } + + validatorCtrl := validator.NewController(logger, validator.ControllerOptions{ + Context: ctx, + DB: db, + RegistryStorage: nodeStorage, + KeyManager: keyManager, + StorageMap: storageMap, + OperatorData: operatorData, + }) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + if err != nil { + return nil, nil, nil, nil, err + } + + return eh, nil, ctrl, nodeStorage, nil +} + +func setupOperatorStorage( + logger *zap.Logger, + db basedb.Database, + operator *testOperator, + ownerAddress *ethcommon.Address, +) (operatorstorage.Storage, *registrystorage.OperatorData) { + if operator == nil { + logger.Fatal("empty test operator was passed") + } + + nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) + if err != nil { + logger.Fatal("failed to create node storage", zap.Error(err)) + } + + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) + if err != nil { + logger.Fatal("couldn't setup operator private key", zap.Error(err)) + } + + _, found, err := nodeStorage.GetPrivateKey() + if err != nil || !found { + logger.Fatal("failed to get operator private key", zap.Error(err)) + } + var operatorData *registrystorage.OperatorData + operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) + + if err != nil { + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) + } + if !found { + operatorData = ®istrystorage.OperatorData{ + PublicKey: operatorPubKey, + ID: operator.id, + OwnerAddress: *ownerAddress, + } + } + + return nodeStorage, operatorData +} + +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + + return simulator.NewSimulatedBackend( + genesis, 50_000_000, + ) +} diff --git a/eth/ethtest/validator_added_test.go b/eth/ethtest/validator_added_test.go new file mode 100644 index 0000000000..2497552e7f --- /dev/null +++ b/eth/ethtest/validator_added_test.go @@ -0,0 +1,134 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +type testValidatorRegisteredInput struct { + *CommonTestInput + events []*validatorRegisteredEventInput +} + +func NewTestValidatorRegisteredInput(common *CommonTestInput) *testValidatorRegisteredInput { + return &testValidatorRegisteredInput{common, nil} +} + +func (input *testValidatorRegisteredInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type validatorRegisteredEventInput struct { + auth *bind.TransactOpts + ops []*testOperator + validator *testValidatorData + share []byte + opsIds []uint64 // separating opsIds from ops as it is a separate event field and should be used for destructive tests +} + +func (input *validatorRegisteredEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.share) == 0: + return fmt.Errorf("validation error: input.share is empty") + case len(input.ops) == 0: + return fmt.Errorf("validation error: input.ops is empty") + } + + if len(input.opsIds) == 0 { + input.opsIds = make([]uint64, len(input.ops)) + for i, op := range input.ops { + input.opsIds[i] = op.id + } + } + + return nil +} + +func (input *testValidatorRegisteredInput) prepare( + validators []*testValidatorData, + shares [][]byte, + ops []*testOperator, + auth *bind.TransactOpts, + expectedNonce *registrystorage.Nonce, + validatorsIds []uint32, +) { + input.events = make([]*validatorRegisteredEventInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + // Check there are no shares in the state for the current validator + valPubKey := validators[validatorId].masterPubKey.Serialize() + share := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, share) + + // Create event input + input.events[i] = &validatorRegisteredEventInput{ + validator: validators[validatorId], + share: shares[validatorId], + auth: auth, + ops: ops, + } + + // expect nonce bumping after each of these ValidatorAdded events handling + *expectedNonce++ + } +} + +func (input *testValidatorRegisteredInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + val := event.validator + valPubKey := val.masterPubKey.Serialize() + shares := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, shares) + + // Call the contract method + _, err := input.boundContract.SimcontractTransactor.RegisterValidator( + event.auth, + val.masterPubKey.Serialize(), + event.opsIds, + event.share, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/validator_removed_test.go b/eth/ethtest/validator_removed_test.go new file mode 100644 index 0000000000..778b67dff8 --- /dev/null +++ b/eth/ethtest/validator_removed_test.go @@ -0,0 +1,104 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testValidatorRemovedInput struct { + auth *bind.TransactOpts + validator *testValidatorData + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *testValidatorRemovedInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +type TestValidatorRemovedEventsInput struct { + *CommonTestInput + events []*testValidatorRemovedInput +} + +func (input *TestValidatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: empty CommonTestInput") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +func NewTestValidatorRemovedEventsInput(common *CommonTestInput) *TestValidatorRemovedEventsInput { + return &TestValidatorRemovedEventsInput{common, nil} +} + +func (input *TestValidatorRemovedEventsInput) prepare( + validators []*testValidatorData, + validatorsIds []uint64, + opsIds []uint64, + auth *bind.TransactOpts, + cluster *simcontract.CallableCluster, +) { + input.events = make([]*testValidatorRemovedInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + input.events[i] = &testValidatorRemovedInput{ + auth, + validators[validatorId], + opsIds, + cluster, + } + } +} + +func (input *TestValidatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + valPubKey := event.validator.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := input.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(input.t, valShare) + + _, err = input.boundContract.SimcontractTransactor.RemoveValidator( + event.auth, + valPubKey, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index 1c909caf88..b207c78a25 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -46,7 +46,7 @@ var ( type taskExecutor interface { StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient ethcommon.Address) error @@ -285,7 +285,7 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, return nil, nil } - sharePK, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) + validatorPubKey, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) if err != nil { eh.metrics.EventProcessingFailed(abiEvent.Name) @@ -298,13 +298,11 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, defer eh.metrics.EventProcessed(abiEvent.Name) - if sharePK == nil { - return nil, nil + if validatorPubKey != nil { + return NewStopValidatorTask(eh.taskExecutor, validatorPubKey), nil } - task := NewStopValidatorTask(eh.taskExecutor, validatorRemovedEvent.PublicKey) - - return task, nil + return nil, nil case ClusterLiquidated: clusterLiquidatedEvent, err := eh.eventParser.ParseClusterLiquidated(event) diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index bf1f96961e..070de44d04 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -1,6 +1,7 @@ package eventhandler import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -12,13 +13,8 @@ import ( "strings" "testing" - "github.com/bloxapp/ssv/operator/validator" - "github.com/bloxapp/ssv/operator/validator/mocks" - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/bloxapp/ssv/utils/blskeygen" - "github.com/pkg/errors" - + ekmcore "github.com/bloxapp/eth2-key-manager/core" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" @@ -27,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/golang/mock/gomock" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -40,10 +37,15 @@ import ( ibftstorage "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/networkconfig" operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/blskeygen" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/utils/threshold" ) @@ -60,15 +62,34 @@ func TestHandleBlockEventsStream(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + operatorsCount := uint64(0) // Create operators rsa keys - ops, err := createOperators(4) + ops, err := createOperators(4, operatorsCount) require.NoError(t, err) + operatorsCount += uint64(len(ops)) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + currentSlot := &utils.SlotValue{} + mockBeaconNetwork := utils.SetupMockBeaconNetwork(t, currentSlot) + mockNetworkConfig := &networkconfig.NetworkConfig{ + Beacon: mockBeaconNetwork, + } + + eh, _, err := setupEventHandler(t, ctx, logger, mockNetworkConfig, ops[0], false) if err != nil { t.Fatal(err) } - sim := simTestBackend(testAddr) + + // Just creating one more key -> address for testing + wrongPk, err := crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + require.NoError(t, err) + testAddr2 := crypto.PubkeyToAddress(wrongPk.PublicKey) + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddr + testAddresses[1] = &testAddr2 + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) // Create JSON-RPC handler rpcServer, _ := sim.Node.RPCHandler() @@ -114,13 +135,23 @@ func TestHandleBlockEventsStream(t *testing.T) { sharesData1, err := generateSharesData(validatorData1, ops, testAddr, 0) require.NoError(t, err) + // Create another validator. We'll create the shares later in the tests + validatorData2, err := createNewValidator(ops) + require.NoError(t, err) + + validatorData3, err := createNewValidator(ops) + require.NoError(t, err) + sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) + require.NoError(t, err) + blockNum := uint64(0x1) + currentSlot.SetSlot(100) t.Run("test OperatorAdded event handle", func(t *testing.T) { for _, op := range ops { // Call the contract method - packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.pub) + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) require.NoError(t, err) _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) require.NoError(t, err) @@ -139,22 +170,22 @@ func TestHandleBlockEventsStream(t *testing.T) { }() // Check that there is no registered operators - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, 0, len(operators)) - // Hanlde the event + // Handle the event lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check storage for a new operator - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + // Check storage for the new operators + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) - // Check if an operator in the storage has same attributes + // Check if operators in the storage have same attributes for i, log := range block.Logs { operatorAddedEvent, err := contractFilterer.ParseOperatorAdded(log) require.NoError(t, err) @@ -162,47 +193,124 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, operatorAddedEvent.OperatorId, data.ID) require.Equal(t, operatorAddedEvent.Owner, data.OwnerAddress) - require.Equal(t, ops[i].pub, data.PublicKey) + require.Equal(t, ops[i].rsaPub, data.PublicKey) } }) - // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct t.Run("test OperatorRemoved event handle", func(t *testing.T) { - // Call the contract method - _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 1) - require.NoError(t, err) - sim.Commit() - block := <-logs - require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + // Should return MalformedEventError and no changes to the state + t.Run("test OperatorRemoved incorrect operator ID", func(t *testing.T) { + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 100500) + require.NoError(t, err) + sim.Commit() - eventsCh := make(chan executionclient.BlockLogs) - go func() { - defer close(eventsCh) - eventsCh <- block - }() + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) - // Check that there is 1 registered operator - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() - // Hanlde the event - lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) - require.NoError(t, err) - blockNum++ + // Check that there is 1 registered operator + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) - // Check if the operator was removed successfuly - // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check if the operator wasn't removed successfully + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct + // TODO: fix this test. It checks nothing, due the handleOperatorRemoved method is no-op currently + t.Run("test OperatorRemoved happy flow", func(t *testing.T) { + // Prepare a new operator to remove it later in this test + op, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + + // Call the contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op[0].rsaPub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Handle OperatorAdded event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + // Check storage for the new operator + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Now start the OperatorRemoved event handling + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 4) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Handle OperatorRemoved event + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + // Check if the operator was removed successfully + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + //require.NoError(t, err) + //require.Equal(t, len(ops), len(operators)) + }) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, // public key is correct, owner is correct, operator ids are correct, shares are correct + // slashing protection data is correct t.Run("test ValidatorAdded event handle", func(t *testing.T) { nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) require.NoError(t, err) @@ -236,9 +344,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + + requireKeyManagerDataToExist(t, eh, 1, validatorData1) + // Check that validator was registered shares := eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -247,12 +358,11 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(1), nonce) - validatorData2, err := createNewValidator(ops) - require.NoError(t, err) sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) require.NoError(t, err) // SharesData length is incorrect. Nonce is bumped; Validator wasn't added + // slashing protection data is not added t.Run("test nonce bumping even for incorrect sharesData length", func(t *testing.T) { // changing the length malformedSharesData := sharesData2[:len(sharesData2)-1] @@ -285,10 +395,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 1, validatorData2) + // Check that validator was not registered, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -299,6 +411,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Length of the shares []byte is correct; nonce is bumped; validator is added + // slashing protection data is correct t.Run("test validator 1 doesnt check validator's 4 share", func(t *testing.T) { malformedSharesData := sharesData2[:] // Corrupt the encrypted last share key of the 4th operator @@ -332,10 +445,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 2, validatorData2) + // Check that validator was registered for op1, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -345,13 +460,9 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, registrystorage.Nonce(3), nonce) }) - validatorData3, err := createNewValidator(ops) - require.NoError(t, err) - sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) - require.NoError(t, err) - // Share for 1st operator is malformed; check nonce is bumped correctly; validator wasn't added - t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { + // slashing protection data is not added + t.Run("test malformed ValidatorAdded and nonce is bumped", func(t *testing.T) { malformedSharesData := sharesData3[:] operatorCount := len(ops) @@ -389,10 +500,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 2, validatorData3) + // Check that validator was not registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -403,6 +516,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Correct event; check nonce is bumped correctly; validator is added + // slashing protection data is correct t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { // regenerate with updated nonce sharesData3, err = generateSharesData(validatorData3, ops, testAddr, 4) @@ -435,10 +549,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 3, validatorData3) + // Check that validator was registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 3, len(shares)) @@ -447,14 +563,192 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(5), nonce) }) + + t.Run("test correct ValidatorAdded again and nonce is bumped with another owner", func(t *testing.T) { + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + authTestAddr2, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr2, 0) + require.NoError(t, err) + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + authTestAddr2, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) + blockNum++ + + requireKeyManagerDataToExist(t, eh, 4, validatorData4) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, 4, len(shares)) + // and nonce was bumped + nonce, err = eh.nodeStorage.GetNextNonce(nil, testAddr2) + require.NoError(t, err) + // Check that nonces are not intertwined between different owner accounts! + require.Equal(t, registrystorage.Nonce(1), nonce) + }) + }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, - // public key is correct, owner is correct, operator ids are correct - t.Run("test ValidatorRemoved event handle", func(t *testing.T) { - _, err = boundContract.SimcontractTransactor.RemoveValidator( + t.Run("test ValidatorRemoved event handling", func(t *testing.T) { + // Must throw error "malformed event: could not find validator share" + t.Run("ValidatorRemoved incorrect event public key", func(t *testing.T) { + pk := validatorData1.masterPubKey.Serialize() + // Corrupt the public key + pk[len(pk)-1] ^= 1 + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + pk, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + t.Run("ValidatorRemoved incorrect owner address", func(t *testing.T) { + wrongAuth, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + wrongAuth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, + // public key is correct, owner is correct, operator ids are correct + // event handler's own operator is responsible for removed validator + t.Run("ValidatorRemoved happy flow", func(t *testing.T) { + valPubKey := validatorData1.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, valShare) + requireKeyManagerDataToExist(t, eh, 4, validatorData1) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator was removed from the validator shares storage. + shares := eh.nodeStorage.Shares().List(nil) + require.Equal(t, 3, len(shares)) + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + requireKeyManagerDataToNotExist(t, eh, 3, validatorData1) + }) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct + // slashing protection data is not deleted + t.Run("test ClusterLiquidated event handle", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Liquidate( auth, - validatorData1.masterPubKey.Serialize(), + testAddr, []uint64{1, 2, 3, 4}, simcontract.CallableCluster{ ValidatorCount: 1, @@ -468,7 +762,7 @@ func TestHandleBlockEventsStream(t *testing.T) { block := <-logs require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) eventsCh := make(chan executionclient.BlockLogs) go func() { @@ -476,14 +770,91 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests. This one has to be in the state + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) + // check that slashing data was not deleted + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct - t.Run("test ClusterLiquidated event handle", func(t *testing.T) { + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is nextEpoch and NOT storedEpoch + t.Run("test ClusterReactivated event handle", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + currentSlot.SetSlot(1000) + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + + // check that slashing data was bumped + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) + + blockNum++ + }) + + // Liquidated event is far in the future + // in order to simulate stored far in the future slashing protection data + t.Run("test ClusterLiquidated event handle - far in the future", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Liquidate( auth, testAddr, @@ -514,11 +885,13 @@ func TestHandleBlockEventsStream(t *testing.T) { blockNum++ }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct - t.Run("test ClusterReactivated event handle", func(t *testing.T) { + // Reactivate event + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is storedEpoch and NOT nextEpoch + t.Run("test ClusterReactivated event handle - far in the future", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Reactivate( auth, - []uint64{1, 2, 3}, + []uint64{1, 2, 3, 4}, big.NewInt(100_000_000), simcontract.CallableCluster{ ValidatorCount: 1, @@ -540,17 +913,44 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) + currentSlot.SetSlot(100) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + + // check that slashing data is greater than current epoch + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Greater(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Greater(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Greater(t, highestProposal, currentSlot.GetSlot()) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, fee recipient is correct t.Run("test FeeRecipientAddressUpdated event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.SetFeeRecipientAddress( auth, - ethcommon.HexToAddress("0x1"), + testAddr2, ) require.NoError(t, err) sim.Commit() @@ -569,14 +969,202 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check if the fee recepient was updated - recepientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) + // Check if the fee recipient was updated + recipientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) require.NoError(t, err) - require.Equal(t, ethcommon.HexToAddress("0x1").String(), recepientData.FeeRecipient.String()) + require.Equal(t, testAddr2.String(), recipientData.FeeRecipient.String()) + }) + + // DO / UNDO in one block tests + t.Run("test DO / UNDO in one block", func(t *testing.T) { + t.Run("test OperatorAdded + OperatorRemoved events handling", func(t *testing.T) { + // There are 5 ops before the test running + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, operatorsCount, uint64(len(operators))) + + tmpOps, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + op := tmpOps[0] + + // Call the RegisterOperator contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + // Call the OperatorRemoved contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, op.id) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // #TODO: Fails until we fix the OperatorAdded: handlers.go #108 + // Check storage for the new operators + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + //require.NoError(t, err) + //require.Equal(t, operatorsCount-1, uint64(len(operators))) + // + //_, found, err := eh.nodeStorage.GetOperatorData(nil, op.id) + //require.NoError(t, err) + //require.False(t, found) + }) + + t.Run("test ValidatorAdded + ValidatorRemoved events handling", func(t *testing.T) { + shares := eh.nodeStorage.Shares().List(nil) + sharesCountBeforeTest := len(shares) + + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + + currentNonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr, int(currentNonce)) + require.NoError(t, err) + + valPubKey := validatorData4.masterPubKey.Serialize() + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + auth, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + valPubKey, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, sharesCountBeforeTest, len(shares)) + // and nonce was bumped + nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + require.Equal(t, currentNonce+1, nonce) + }) + + t.Run("test ClusterLiquidated + ClusterReactivated events handling", func(t *testing.T) { + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + + require.NotNil(t, share) + require.False(t, share.Liquidated) + _, err = boundContract.SimcontractTransactor.Liquidate( + auth, + testAddr, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + }) }) } -func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { +func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, network *networkconfig.NetworkConfig, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { db, err := kv.NewInMemory(logger, basedb.Options{ Ctx: ctx, }) @@ -584,9 +1172,14 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op storageMap := ibftstorage.NewStores() nodeStorage, operatorData := setupOperatorStorage(logger, db, operator) - testNetworkConfig := networkconfig.TestNetwork - keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, &utils.SlotValue{}), + } + } + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, *network, true, "") if err != nil { return nil, nil, err } @@ -607,7 +1200,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -633,6 +1226,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op KeyManager: keyManager, StorageMap: storageMap, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) @@ -644,7 +1238,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -660,7 +1254,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *testOperator) (operatorstorage.Storage, *registrystorage.OperatorData) { if operator == nil { - logger.Fatal("empty test operator was passed", zap.Error(fmt.Errorf("empty test operator was passed"))) + logger.Fatal("empty test operator was passed") } nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) @@ -668,9 +1262,9 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test logger.Fatal("failed to create node storage", zap.Error(err)) } - operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.priv)) + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) if err != nil { - logger.Fatal("could not setup operator private key", zap.Error(err)) + logger.Fatal("couldn't setup operator private key", zap.Error(err)) } _, found, err := nodeStorage.GetPrivateKey() @@ -681,7 +1275,7 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) if err != nil { - logger.Fatal("could not get operator data by public key", zap.Error(err)) + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) } if !found { operatorData = ®istrystorage.OperatorData{ @@ -704,20 +1298,22 @@ func unmarshalLog(t *testing.T, rawOperatorAdded string) ethtypes.Log { return vLogOperatorAdded } -func simTestBackend(testAddr ethcommon.Address) *simulator.SimulatedBackend { +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + return simulator.NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000000000)}, - }, 10000000, + genesis, 50_000_000, ) } func TestCreatingSharesData(t *testing.T) { - owner := testAddr nonce := 0 - // - ops, err := createOperators(4) + ops, err := createOperators(4, 1) require.NoError(t, err) validatorData, err := createNewValidator(ops) @@ -742,7 +1338,7 @@ func TestCreatingSharesData(t *testing.T) { encryptedKeys := splitBytes(sharesData[pubKeysOffset:], len(sharesData[pubKeysOffset:])/operatorCount) for i, enck := range encryptedKeys { - priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].priv)) + priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].rsaPriv)) require.NoError(t, err) decryptedSharePrivateKey, err := rsaencryption.DecodeKey(priv, enck) require.NoError(t, err) @@ -763,9 +1359,9 @@ type testValidatorData struct { } type testOperator struct { - id uint64 - pub []byte // rsa pub - priv []byte // rsa sk + id uint64 + rsaPub []byte + rsaPriv []byte } type testShare struct { @@ -774,24 +1370,32 @@ type testShare struct { pub *bls.PublicKey } +func shareExist(accounts []ekmcore.ValidatorAccount, sharePubKey []byte) bool { + for _, acc := range accounts { + if bytes.Equal(acc.ValidatorPublicKey(), sharePubKey) { + return true + } + } + return false +} + func createNewValidator(ops []*testOperator) (*testValidatorData, error) { validatorData := &testValidatorData{} sharesCount := uint64(len(ops)) threshold.Init() - msk, pubk := blskeygen.GenBLSKeyPair() + msk, mpk := blskeygen.GenBLSKeyPair() secVec := msk.GetMasterSecretKey(int(sharesCount)) - pubks := bls.GetMasterPublicKey(secVec) + pubKeys := bls.GetMasterPublicKey(secVec) splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) if err != nil { return nil, err } - num := uint64(len(ops)) - validatorData.operatorsShares = make([]*testShare, num) + validatorData.operatorsShares = make([]*testShare, sharesCount) - // derive a `hareCount` number of shares - for i := uint64(1); i <= num; i++ { + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { validatorData.operatorsShares[i-1] = &testShare{ opId: i, sec: splitKeys[i], @@ -800,54 +1404,54 @@ func createNewValidator(ops []*testOperator) (*testValidatorData, error) { } validatorData.masterKey = msk - validatorData.masterPubKey = pubk - validatorData.masterPublicKeys = pubks + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys return validatorData, nil } -func createOperators(num uint64) ([]*testOperator, error) { - testops := make([]*testOperator, num) +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) for i := uint64(1); i <= num; i++ { pb, sk, err := rsaencryption.GenerateKeys() if err != nil { return nil, err } - testops[i-1] = &testOperator{ - id: i, - pub: pb, - priv: sk, + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, } } - return testops, nil + return testOps, nil } func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { - var pubkeys []byte + var pubKeys []byte var encryptedShares []byte for i, op := range operators { - rsakey, err := rsaencryption.ConvertPemToPublicKey(op.pub) + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) if err != nil { - return nil, fmt.Errorf("cant convert publickey: %w", err) + return nil, fmt.Errorf("can't convert public key: %w", err) } - rawshare := validatorData.operatorsShares[i].sec.SerializeToHexStr() - ciphertext, err := rsa.EncryptPKCS1v15(rand.Reader, rsakey, []byte(rawshare)) + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) if err != nil { - return nil, errors.New("cant encrypt share") + return nil, fmt.Errorf("can't encrypt share: %w", err) } - rsapriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.priv)) + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) if err != nil { - return nil, err + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) } // check that we encrypt right shareSecret := &bls.SecretKey{} - decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsapriv, ciphertext) + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) if err != nil { return nil, err } @@ -855,22 +1459,56 @@ func generateSharesData(validatorData *testValidatorData, operators []*testOpera return nil, err } - pubkeys = append(pubkeys, validatorData.operatorsShares[i].pub.Serialize()...) - encryptedShares = append(encryptedShares, ciphertext...) + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) } - tosign := fmt.Sprintf("%s:%d", owner.String(), nonce) - msghash := crypto.Keccak256([]byte(tosign)) - signed := validatorData.masterKey.Sign(string(msghash)) + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) sig := signed.Serialize() - if !signed.VerifyByte(validatorData.masterPubKey, msghash) { - return nil, errors.New("couldn't sign correctly") + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") } - sharesData := append(pubkeys, encryptedShares...) + sharesData := append(pubKeys, encryptedShares...) sharesDataSigned := append(sig, sharesData...) return sharesDataSigned, nil } + +func requireKeyManagerDataToExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.True(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) +} + +func requireKeyManagerDataToNotExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.False(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.False(t, found) +} diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 7c25d7e6f4..d4632ddf6f 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -12,6 +12,7 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" "go.uber.org/zap" + "github.com/bloxapp/ssv/ekm" "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/logging/fields" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" @@ -39,10 +40,10 @@ var ( func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.ContractOperatorAdded) error { logger := eh.logger.With( - zap.String("event_type", OperatorAdded), + fields.EventName(OperatorAdded), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.OperatorPubKey(event.PublicKey), ) logger.Debug("processing event") @@ -85,7 +86,7 @@ func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.Cont func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.ContractOperatorRemoved) error { logger := eh.logger.With( - zap.String("event_type", OperatorRemoved), + fields.EventName(OperatorRemoved), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), ) @@ -101,8 +102,8 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co } logger = logger.With( - zap.String("operator_pub_key", ethcommon.Bytes2Hex(od.PublicKey)), - zap.String("owner_address", od.OwnerAddress.String()), + fields.OperatorPubKey(od.PublicKey), + fields.Owner(od.OwnerAddress), ) // TODO: In original handler we didn't delete operator data, so this behavior was preserved. However we likely need to. @@ -124,10 +125,10 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co func (eh *EventHandler) handleValidatorAdded(txn basedb.Txn, event *contract.ContractValidatorAdded) (ownShare *ssvtypes.SSVShare, err error) { logger := eh.logger.With( - zap.String("event_type", ValidatorAdded), + fields.EventName(ValidatorAdded), fields.TxHash(event.Raw.TxHash), fields.Owner(event.Owner), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.OperatorIDs(event.OperatorIds), fields.Validator(event.PublicKey), ) @@ -324,12 +325,12 @@ func validatorAddedEventToShare( return &validatorShare, shareSecret, nil } -func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) ([]byte, error) { +func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) (spectypes.ValidatorPK, error) { logger := eh.logger.With( - zap.String("event_type", ValidatorRemoved), + fields.EventName(ValidatorRemoved), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), fields.PubKey(event.PublicKey), ) logger.Debug("processing event") @@ -372,6 +373,11 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C logger = logger.With(zap.String("validator_pubkey", hex.EncodeToString(share.ValidatorPubKey))) } if isOperatorShare { + err = eh.keyManager.RemoveShare(hex.EncodeToString(share.SharePubKey)) + if err != nil { + return nil, fmt.Errorf("could not remove share from ekm storage: %w", err) + } + eh.metrics.ValidatorRemoved(event.PublicKey) logger.Debug("processed event") return share.ValidatorPubKey, nil @@ -383,10 +389,10 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract.ContractClusterLiquidated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterLiquidated), + fields.EventName(ClusterLiquidated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -405,10 +411,10 @@ func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract. func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract.ContractClusterReactivated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterReactivated), + fields.EventName(ClusterReactivated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -417,6 +423,13 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract return nil, fmt.Errorf("could not process cluster event: %w", err) } + // bump slashing protection for operator reactivated validators + for _, share := range toReactivate { + if err := eh.keyManager.(ekm.StorageProvider).BumpSlashingProtection(share.SharePubKey); err != nil { + return nil, fmt.Errorf("could not bump slashing protection: %w", err) + } + } + if len(enabledPubKeys) > 0 { logger = logger.With(zap.Strings("enabled_validators", enabledPubKeys)) } @@ -427,9 +440,9 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract func (eh *EventHandler) handleFeeRecipientAddressUpdated(txn basedb.Txn, event *contract.ContractFeeRecipientAddressUpdated) (bool, error) { logger := eh.logger.With( - zap.String("event_type", FeeRecipientAddressUpdated), + fields.EventName(FeeRecipientAddressUpdated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.FeeRecipient(event.RecipientAddress.Bytes()), ) logger.Debug("processing event") diff --git a/eth/eventhandler/local_events_test.go b/eth/eventhandler/local_events_test.go index 7697c79363..fda1ae0080 100644 --- a/eth/eventhandler/local_events_test.go +++ b/eth/eventhandler/local_events_test.go @@ -18,7 +18,7 @@ import ( func TestHandleLocalEvent(t *testing.T) { // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) t.Run("correct OperatorAdded event", func(t *testing.T) { @@ -46,7 +46,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -73,7 +73,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } diff --git a/eth/eventhandler/task.go b/eth/eventhandler/task.go index 3e825140b8..f6e2894fa8 100644 --- a/eth/eventhandler/task.go +++ b/eth/eventhandler/task.go @@ -1,9 +1,10 @@ package eventhandler import ( + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) type Task interface { @@ -11,15 +12,15 @@ type Task interface { } type startValidatorExecutor interface { - StartValidator(share *ssvtypes.SSVShare) error + StartValidator(share *types.SSVShare) error } type StartValidatorTask struct { executor startValidatorExecutor - share *ssvtypes.SSVShare + share *types.SSVShare } -func NewStartValidatorTask(executor startValidatorExecutor, share *ssvtypes.SSVShare) *StartValidatorTask { +func NewStartValidatorTask(executor startValidatorExecutor, share *types.SSVShare) *StartValidatorTask { return &StartValidatorTask{ executor: executor, share: share, @@ -31,41 +32,41 @@ func (t StartValidatorTask) Execute() error { } type stopValidatorExecutor interface { - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error } type StopValidatorTask struct { - executor stopValidatorExecutor - publicKey []byte + executor stopValidatorExecutor + pubKey spectypes.ValidatorPK } -func NewStopValidatorTask(executor stopValidatorExecutor, publicKey []byte) *StopValidatorTask { +func NewStopValidatorTask(executor stopValidatorExecutor, pubKey spectypes.ValidatorPK) *StopValidatorTask { return &StopValidatorTask{ - executor: executor, - publicKey: publicKey, + executor: executor, + pubKey: pubKey, } } func (t StopValidatorTask) Execute() error { - return t.executor.StopValidator(t.publicKey) + return t.executor.StopValidator(t.pubKey) } type liquidateClusterExecutor interface { - LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error + LiquidateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error } type LiquidateClusterTask struct { executor liquidateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toLiquidate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toLiquidate []*types.SSVShare } func NewLiquidateClusterTask( executor liquidateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toLiquidate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toLiquidate []*types.SSVShare, ) *LiquidateClusterTask { return &LiquidateClusterTask{ executor: executor, @@ -80,21 +81,21 @@ func (t LiquidateClusterTask) Execute() error { } type reactivateClusterExecutor interface { - ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error + ReactivateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error } type ReactivateClusterTask struct { executor reactivateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toReactivate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toReactivate []*types.SSVShare } func NewReactivateClusterTask( executor reactivateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toReactivate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toReactivate []*types.SSVShare, ) *ReactivateClusterTask { return &ReactivateClusterTask{ executor: executor, diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index 8792aadc91..a735c53dc9 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -3,9 +3,10 @@ package eventhandler import ( "context" "encoding/binary" - "github.com/golang/mock/gomock" "testing" + "github.com/golang/mock/gomock" + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -48,10 +49,10 @@ func TestExecuteTask(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) - eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, ops[0], true) + eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, nil, ops[0], true) require.NoError(t, err) t.Run("test AddValidator task execution - not started", func(t *testing.T) { @@ -145,10 +146,10 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -189,7 +190,7 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { } happyFlow := []string{ "successfully setup operator keys", - "setting validator controller", + "setting up validator controller", "malformed event: failed to verify signature", "processed events from block", } diff --git a/eth/eventsyncer/event_syncer_test.go b/eth/eventsyncer/event_syncer_test.go index 4cd2e73e68..9b500fe091 100644 --- a/eth/eventsyncer/event_syncer_test.go +++ b/eth/eventsyncer/event_syncer_test.go @@ -11,6 +11,7 @@ import ( "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/ethereum/go-ethereum/accounts/abi" @@ -152,6 +153,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger) *e DB: db, RegistryStorage: nodeStorage, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) diff --git a/eth/executionclient/execution_client_test.go b/eth/executionclient/execution_client_test.go index 823515c52b..4fed0795c3 100644 --- a/eth/executionclient/execution_client_test.go +++ b/eth/executionclient/execution_client_test.go @@ -67,7 +67,7 @@ func TestFetchHistoricalLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -131,7 +131,7 @@ func TestStreamLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -215,7 +215,7 @@ func TestFetchLogsInBatches(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -325,7 +325,7 @@ func TestChainReorganizationLogs(t *testing.T) { // defer rpcServer.Stop() // defer httpsrv.Close() - // addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + // addr := httpToWebSocketURL(httpsrv.URL) // // 1. // parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -417,7 +417,7 @@ func TestSimSSV(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -584,3 +584,7 @@ func TestSimSSV(t *testing.T) { require.NoError(t, client.Close()) require.NoError(t, sim.Close()) } + +func httpToWebSocketURL(url string) string { + return "ws:" + strings.TrimPrefix(url, "http:") +} diff --git a/eth/simulator/simcontract/simcontract.go b/eth/simulator/simcontract/simcontract.go index 9da8921e7a..2877c65b29 100644 --- a/eth/simulator/simcontract/simcontract.go +++ b/eth/simulator/simcontract/simcontract.go @@ -41,7 +41,7 @@ type CallableCluster struct { // SimcontractMetaData contains all meta data concerning the Simcontract contract. var SimcontractMetaData = &bind.MetaData{ ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterLiquidated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterReactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"FeeRecipientAddressUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"OperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"OperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"shares\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorRemoved\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"clusterOwner\",\"type\":\"address\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"liquidate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"reactivate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"registerOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"bytes\",\"name\":\"sharesData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"registerValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"removeOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"removeValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"setFeeRecipientAddress\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b8273ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea2646970667358221220a849e84b21b5cf14144f9145592d2e879b8dfd174c980e9d839aabab095d209064736f6c63430008120033", + Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b3373ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea26469706673582212206464f7d32909b03e1e16f822f4ba73e56f9b875dfda6cb13f3fc97c182c5e43664736f6c63430008120033", } // SimcontractABI is the input ABI used to generate the binding from. diff --git a/eth/simulator/simcontract/simcontract.sol b/eth/simulator/simcontract/simcontract.sol index 23277e23e2..9325802822 100644 --- a/eth/simulator/simcontract/simcontract.sol +++ b/eth/simulator/simcontract/simcontract.sol @@ -52,20 +52,43 @@ contract Callable { _operatorId += 1; emit OperatorAdded(_operatorId, msg.sender, publicKey, fee); } - function removeOperator(uint64 operatorId) public {emit OperatorRemoved(operatorId);} + + function removeOperator(uint64 operatorId) public { + emit OperatorRemoved(operatorId); + } + function registerValidator( bytes calldata publicKey, uint64[] memory operatorIds, bytes calldata sharesData, uint256 amount, Cluster memory cluster - ) public { emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster);} + ) public { + emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster); + } + function removeValidator( bytes calldata publicKey, uint64[] calldata operatorIds, Cluster memory cluster - ) public {emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster);} - function liquidate(address clusterOwner, uint64[] memory operatorIds, Cluster memory cluster) public {emit ClusterLiquidated(clusterOwner, operatorIds, cluster);} - function reactivate(uint64[] calldata operatorIds, uint256 amount, Cluster memory cluster) public {emit ClusterReactivated(msg.sender, operatorIds, cluster);} + ) public { + emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster); + } + + function liquidate(address clusterOwner, + uint64[] memory operatorIds, + Cluster memory cluster + ) public { + emit ClusterLiquidated(msg.sender, operatorIds, cluster); + } + + function reactivate( + uint64[] calldata operatorIds, + uint256 amount, + Cluster memory cluster + ) public { + emit ClusterReactivated(msg.sender, operatorIds, cluster); + } + function setFeeRecipientAddress(address recipientAddress) public {emit FeeRecipientAddressUpdated(msg.sender, recipientAddress);} } diff --git a/go.mod b/go.mod index 5fa7730cf6..c40bccbcde 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.20 require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.16.3 - github.com/bloxapp/eth2-key-manager v1.3.1 - github.com/bloxapp/ssv-spec v0.3.1 + github.com/bloxapp/eth2-key-manager v1.3.2 + github.com/bloxapp/ssv-spec v0.3.4 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/cornelk/hashmap v1.0.8 @@ -22,10 +22,12 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/herumi/bls-eth-go-binary v1.29.1 github.com/ilyakaznacheev/cleanenv v1.4.2 + github.com/jamiealquiza/tachymeter v2.0.0+incompatible github.com/jellydator/ttlcache/v3 v3.0.1 github.com/libp2p/go-libp2p v0.28.2 github.com/libp2p/go-libp2p-kad-dht v0.23.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 + github.com/microsoft/go-crypto-openssl v0.2.8 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multistream v0.4.1 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -37,6 +39,7 @@ require ( github.com/sourcegraph/conc v0.3.0 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 + github.com/wealdtech/go-eth2-types/v2 v2.8.1 github.com/wealdtech/go-eth2-util v1.8.1 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 go.uber.org/multierr v1.11.0 @@ -192,7 +195,6 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/urfave/cli/v2 v2.24.1 // indirect github.com/wealdtech/go-bytesutil v1.2.1 // indirect - github.com/wealdtech/go-eth2-types/v2 v2.8.1 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect @@ -222,5 +224,3 @@ require ( replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f - -replace github.com/bloxapp/ssv-spec => github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 diff --git a/go.sum b/go.sum index 7b8753260f..5eb22ec3c0 100644 --- a/go.sum +++ b/go.sum @@ -54,10 +54,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bloxapp/eth2-key-manager v1.3.1 h1:1olQcOHRY2TN1o8JX9AN1siEIJXWnlM+BlknfBbXoo4= -github.com/bloxapp/eth2-key-manager v1.3.1/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 h1:ikChvdYVw4GFSlnIS+u1qmNqOvgq2a2H3b2FZ44KBn8= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= +github.com/bloxapp/eth2-key-manager v1.3.2 h1:xzxwYQZr8DoQrkCBkTnSdDWqqoPq/iy5VoKLxfPf4IY= +github.com/bloxapp/eth2-key-manager v1.3.2/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/ssv-spec v0.3.4 h1:uu1pAP8FBucGf1FGORjzqz7if0vWGRY5w6ILLhA7IuM= +github.com/bloxapp/ssv-spec v0.3.4/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= @@ -367,6 +367,8 @@ github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0Gqw github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jamiealquiza/tachymeter v2.0.0+incompatible h1:mGiF1DGo8l6vnGT8FXNNcIXht/YmjzfraiUprXYwJ6g= +github.com/jamiealquiza/tachymeter v2.0.0+incompatible/go.mod h1:Ayf6zPZKEnLsc3winWEXJRkTBhdHo58HODAu1oFJkYU= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= @@ -487,6 +489,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/microsoft/go-crypto-openssl v0.2.8 h1:16B6DVeBCimOAG0B92PSySOnVDq6Qr/siI3TyyMHXoI= +github.com/microsoft/go-crypto-openssl v0.2.8/go.mod h1:xOSmQnWz4xvNB2+KQN2g2UUwMG9vqDHBk9nk/NdmyRw= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= diff --git a/identity/store_test.go b/identity/store_test.go index 4fe7da535a..877eeac669 100644 --- a/identity/store_test.go +++ b/identity/store_test.go @@ -68,7 +68,7 @@ func TestSetupPrivateKey(t *testing.T) { require.NoError(t, err) require.NotNil(t, sk) - interfacePriv, err := commons.ConvertToInterfacePrivkey(privKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privKey) require.NoError(t, err) b, err := interfacePriv.Raw() require.NoError(t, err) @@ -87,7 +87,7 @@ func TestSetupPrivateKey(t *testing.T) { return } if test.existKey != "" && test.passedKey == "" { // exist and not passed in env - interfacePriv, err := commons.ConvertToInterfacePrivkey(privateKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privateKey) require.NoError(t, err) b, err := interfacePriv.Raw() require.NoError(t, err) @@ -95,7 +95,7 @@ func TestSetupPrivateKey(t *testing.T) { return } // not exist && passed and exist && passed - interfacePriv, err := commons.ConvertToInterfacePrivkey(privateKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privateKey) require.NoError(t, err) b, err := interfacePriv.Raw() require.NoError(t, err) diff --git a/integration/qbft/tests/msg_router.go b/integration/qbft/tests/msg_router.go index bf3b667e98..dda7b7c243 100644 --- a/integration/qbft/tests/msg_router.go +++ b/integration/qbft/tests/msg_router.go @@ -1,21 +1,26 @@ package tests import ( - spectypes "github.com/bloxapp/ssv-spec/types" - protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + "context" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" ) type msgRouter struct { + logger *zap.Logger validator *protocolvalidator.Validator } -func (m *msgRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - m.validator.HandleMessage(logger, &message) +func (m *msgRouter) Route(_ context.Context, message *queue.DecodedSSVMessage) { + m.validator.HandleMessage(m.logger, message) } -func newMsgRouter(v *protocolvalidator.Validator) *msgRouter { +func newMsgRouter(logger *zap.Logger, v *protocolvalidator.Validator) *msgRouter { return &msgRouter{ validator: v, + logger: logger, } } diff --git a/integration/qbft/tests/round_change_test.go b/integration/qbft/tests/round_change_test.go index 65c6038e5f..4dbb839f5f 100644 --- a/integration/qbft/tests/round_change_test.go +++ b/integration/qbft/tests/round_change_test.go @@ -2,12 +2,13 @@ package tests import ( "testing" + "time" "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" - protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/stretchr/testify/require" + + protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) func TestRoundChange4CommitteeScenario(t *testing.T) { @@ -18,8 +19,8 @@ func TestRoundChange4CommitteeScenario(t *testing.T) { Duties: map[spectypes.OperatorID]DutyProperties{ 2: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, 1: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, - 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, - 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, + 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, + 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, }, ValidationFunctions: map[spectypes.OperatorID]func(*testing.T, int, *protocolstorage.StoredInstance){ 1: roundChangeValidator(), diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index 5fbf6c89b9..13f14d07f9 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -2,7 +2,6 @@ package tests import ( "context" - "fmt" "testing" "time" @@ -21,11 +20,9 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/validator" protocolbeacon "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" @@ -63,15 +60,6 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { for id := 1; id <= s.Committee; id++ { id := spectypes.OperatorID(id) s.validators[id] = createValidator(t, ctx, id, getKeySet(s.Committee), logger, s.shared.Nodes[id]) - - stores := newStores(logger) - s.shared.Nodes[id].RegisterHandlers(logger, protocolp2p.WithHandler( - protocolp2p.LastDecidedProtocol, - handlers.LastDecidedHandler(logger.Named(fmt.Sprintf("decided-handler-%d", id)), stores, s.shared.Nodes[id]), - ), protocolp2p.WithHandler( - protocolp2p.DecidedHistoryProtocol, - handlers.HistoryHandler(logger.Named(fmt.Sprintf("history-handler-%d", id)), stores, s.shared.Nodes[id], 25), - )) } //invoking duties @@ -84,7 +72,7 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { copy(pk[:], getKeySet(s.Committee).ValidatorPK.Serialize()) ssvMsg, err := validator.CreateDutyExecuteMsg(duty, pk, networkconfig.TestNetwork.Domain) require.NoError(t, err) - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) require.NoError(t, err) s.validators[id].Queues[role].Q.Push(dec) @@ -200,8 +188,9 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID require.NoError(t, err) options := protocolvalidator.Options{ - Storage: newStores(logger), - Network: node, + Storage: newStores(logger), + Network: node, + BeaconNetwork: networkconfig.TestNetwork.Beacon, SSVShare: &types.SSVShare{ Share: *testingShare(keySet, id), Metadata: types.Metadata{ @@ -218,7 +207,7 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID options.DutyRunners = validator.SetupRunners(ctx, logger, options) val := protocolvalidator.NewValidator(ctx, cancel, options) - node.UseMessageRouter(newMsgRouter(val)) + node.UseMessageRouter(newMsgRouter(logger, val)) started, err := val.Start(logger) require.NoError(t, err) require.True(t, started) diff --git a/integration/qbft/tests/setup_test.go b/integration/qbft/tests/setup_test.go index f8c4222dbc..d319c44793 100644 --- a/integration/qbft/tests/setup_test.go +++ b/integration/qbft/tests/setup_test.go @@ -42,7 +42,11 @@ func TestMain(m *testing.M) { types.SetDefaultDomain(testingutils.TestingSSVDomainType) - ln, err := p2pv1.CreateAndStartLocalNet(ctx, logger, maxSupportedCommittee, maxSupportedQuorum, false) + ln, err := p2pv1.CreateAndStartLocalNet(ctx, logger, p2pv1.LocalNetOptions{ + Nodes: maxSupportedCommittee, + MinConnected: maxSupportedQuorum, + UseDiscv5: false, + }) if err != nil { logger.Fatal("error creating and start local net", zap.Error(err)) return diff --git a/logging/fields/fields.go b/logging/fields/fields.go index 6b1de4ffc5..94ca995621 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -64,9 +64,11 @@ const ( FieldName = "name" FieldNetwork = "network" FieldOperatorId = "operator_id" + FieldOperatorIDs = "operator_ids" FieldOperatorPubKey = "operator_pubkey" FieldOwnerAddress = "owner_address" FieldPeerID = "peer_id" + FieldPeerScore = "peer_score" FieldPrivKey = "privkey" FieldPubKey = "pubkey" FieldRole = "role" @@ -154,6 +156,10 @@ func PeerID(val peer.ID) zapcore.Field { return zap.Stringer(FieldPeerID, val) } +func PeerScore(val float64) zapcore.Field { + return zap.Stringer(FieldPeerScore, stringer.Float64Stringer{Val: val}) +} + func BindIP(val net.IP) zapcore.Field { return zap.Stringer(FieldBindIP, val) } @@ -190,6 +196,10 @@ func OperatorID(operatorId spectypes.OperatorID) zap.Field { return zap.Uint64(FieldOperatorId, operatorId) } +func OperatorIDs(operatorIDs []spectypes.OperatorID) zap.Field { + return zap.Uint64s(FieldOperatorIDs, operatorIDs) +} + func OperatorIDStr(operatorId string) zap.Field { return zap.String(FieldOperatorId, operatorId) } diff --git a/logging/names.go b/logging/names.go index 5a23d12da9..298f6a9ee0 100644 --- a/logging/names.go +++ b/logging/names.go @@ -23,4 +23,5 @@ const ( NamePubsubTrace = "PubsubTrace" NameScoreInspector = "ScoreInspector" NameEventHandler = "EventHandler" + NameDutyFetcher = "DutyFetcher" ) diff --git a/logging/testing.go b/logging/testing.go index 6b6abd8326..b7617c2680 100644 --- a/logging/testing.go +++ b/logging/testing.go @@ -5,16 +5,17 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestLogger(t *testing.T) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(t, err) return zap.L().Named(t.Name()) } func BenchLogger(b *testing.B) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(b, err) return zap.L().Named(b.Name()) } diff --git a/message/validation/consensus_state.go b/message/validation/consensus_state.go new file mode 100644 index 0000000000..5f8869e6d4 --- /dev/null +++ b/message/validation/consensus_state.go @@ -0,0 +1,34 @@ +package validation + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/cornelk/hashmap" +) + +// ConsensusID uniquely identifies a public key and role pair to keep track of state. +type ConsensusID struct { + PubKey phase0.BLSPubKey + Role spectypes.BeaconRole +} + +// ConsensusState keeps track of the signers for a given public key and role. +type ConsensusState struct { + // TODO: consider evicting old data to avoid excessive memory consumption + Signers *hashmap.Map[spectypes.OperatorID, *SignerState] +} + +// GetSignerState retrieves the state for the given signer. +// Returns nil if the signer is not found. +func (cs *ConsensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { + signerState, _ := cs.Signers.Get(signer) + return signerState +} + +// CreateSignerState initializes and sets a new SignerState for the given signer. +func (cs *ConsensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { + signerState := &SignerState{} + cs.Signers.Set(signer, signerState) + + return signerState +} diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go new file mode 100644 index 0000000000..674f148ea6 --- /dev/null +++ b/message/validation/consensus_validation.go @@ -0,0 +1,431 @@ +package validation + +// consensus_validation.go contains methods for validating consensus messages + +import ( + "bytes" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validateConsensusMessage( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, + messageID spectypes.MessageID, + receivedAt time.Time, + signatureVerifier func() error, +) (ConsensusDescriptor, phase0.Slot, error) { + var consensusDescriptor ConsensusDescriptor + + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + consensusDescriptor = ConsensusDescriptor{ + QBFTMessageType: signedMsg.Message.MsgType, + Round: msgRound, + Signers: signedMsg.Signers, + Committee: share.Committee, + } + + mv.metrics.ConsensusMsgType(signedMsg.Message.MsgType, len(signedMsg.Signers)) + + if messageID.GetRoleType() == spectypes.BNRoleValidatorRegistration { + return consensusDescriptor, msgSlot, ErrConsensusValidatorRegistration + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return consensusDescriptor, msgSlot, err + } + + if !mv.validQBFTMsgType(signedMsg.Message.MsgType) { + return consensusDescriptor, msgSlot, ErrUnknownQBFTMessageType + } + + if err := mv.validConsensusSigners(share, signedMsg); err != nil { + return consensusDescriptor, msgSlot, err + } + + role := messageID.GetRoleType() + + if err := mv.validateSlotTime(msgSlot, role, receivedAt); err != nil { + return consensusDescriptor, msgSlot, err + } + + if maxRound := mv.maxRound(role); msgRound > maxRound { + err := ErrRoundTooHigh + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("%v (%v role)", maxRound, role) + return consensusDescriptor, msgSlot, err + } + + slotStartTime := mv.netCfg.Beacon.GetSlotStartTime(msgSlot) /*. + Add(mv.waitAfterSlotStart(role))*/ // TODO: not supported yet because first round is non-deterministic now + + sinceSlotStart := time.Duration(0) + estimatedRound := specqbft.FirstRound + if receivedAt.After(slotStartTime) { + sinceSlotStart = receivedAt.Sub(slotStartTime) + estimatedRound = mv.currentEstimatedRound(sinceSlotStart) + } + + // TODO: lowestAllowed is not supported yet because first round is non-deterministic now + lowestAllowed := /*estimatedRound - allowedRoundsInPast*/ specqbft.FirstRound + highestAllowed := estimatedRound + allowedRoundsInFuture + + if msgRound < lowestAllowed || msgRound > highestAllowed { + err := ErrEstimatedRoundTooFar + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("between %v and %v (%v role) / %v passed", lowestAllowed, highestAllowed, role, sinceSlotStart) + return consensusDescriptor, msgSlot, err + } + + if mv.hasFullData(signedMsg) { + hashedFullData, err := specqbft.HashDataRoot(signedMsg.FullData) + if err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("hash data root: %w", err) + } + + if hashedFullData != signedMsg.Message.Root { + return consensusDescriptor, msgSlot, ErrInvalidHash + } + } + + if err := mv.validateBeaconDuty(messageID.GetRoleType(), msgSlot, share); err != nil { + return consensusDescriptor, msgSlot, err + } + + state := mv.consensusState(messageID) + for _, signer := range signedMsg.Signers { + if err := mv.validateSignerBehaviorConsensus(state, signer, share, messageID, signedMsg); err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("bad signer behavior: %w", err) + } + } + + if signatureVerifier != nil { + if err := signatureVerifier(); err != nil { + return consensusDescriptor, msgSlot, err + } + } + + for _, signer := range signedMsg.Signers { + signerState := state.GetSignerState(signer) + if signerState == nil { + signerState = state.CreateSignerState(signer) + } + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, msgRound, newEpoch) + } else if msgSlot == signerState.Slot && msgRound > signerState.Round { + signerState.ResetRound(msgRound) + } + + if mv.hasFullData(signedMsg) && signerState.ProposalData == nil { + signerState.ProposalData = signedMsg.FullData + } + + signerState.MessageCounts.RecordConsensusMessage(signedMsg) + } + + return consensusDescriptor, msgSlot, nil +} + +func (mv *messageValidator) validateJustifications( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, +) error { + pj, err := signedMsg.Message.GetPrepareJustifications() + if err != nil { + e := ErrMalformedPrepareJustifications + e.innerErr = err + return e + } + + if len(pj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType { + e := ErrUnexpectedPrepareJustifications + e.got = signedMsg.Message.MsgType + return e + } + + rcj, err := signedMsg.Message.GetRoundChangeJustifications() + if err != nil { + e := ErrMalformedRoundChangeJustifications + e.innerErr = err + return e + } + + if len(rcj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType && signedMsg.Message.MsgType != specqbft.RoundChangeMsgType { + e := ErrUnexpectedRoundChangeJustifications + e.got = signedMsg.Message.MsgType + return e + } + + if signedMsg.Message.MsgType == specqbft.ProposalMsgType { + cfg := newQBFTConfig(mv.netCfg.Domain) + + if err := instance.IsProposalJustification( + cfg, + share, + rcj, + pj, + signedMsg.Message.Height, + signedMsg.Message.Round, + signedMsg.FullData, + ); err != nil { + e := ErrInvalidJustifications + e.innerErr = err + return e + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorConsensus( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *specqbft.SignedMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return mv.validateJustifications(share, signedMsg) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + if msgSlot == signerState.Slot && msgRound < signerState.Round { + // Signers aren't allowed to decrease their round. + // If they've sent a future message due to clock error, + // they'd have to wait for the next slot/round to be accepted. + err := ErrRoundAlreadyAdvanced + err.want = signerState.Round + err.got = msgRound + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if msgSlot == signerState.Slot && msgRound == signerState.Round { + if mv.hasFullData(signedMsg) && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedMsg.FullData) { + return ErrDuplicatedProposalWithDifferentData + } + + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidateConsensusMessage(signedMsg, limits); err != nil { + return err + } + } + + return mv.validateJustifications(share, signedMsg) +} + +func (mv *messageValidator) validateDutyCount( + state *SignerState, + msgID spectypes.MessageID, + newDutyInSameEpoch bool, +) error { + switch msgID.GetRoleType() { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator, spectypes.BNRoleValidatorRegistration: + limit := maxDutiesPerEpoch + + if sameSlot := !newDutyInSameEpoch; sameSlot { + limit++ + } + + if state.EpochDuties >= limit { + err := ErrTooManyDutiesPerEpoch + err.got = fmt.Sprintf("%v (role %v)", state.EpochDuties, msgID.GetRoleType()) + err.want = fmt.Sprintf("less than %v", maxDutiesPerEpoch) + return err + } + + return nil + } + + return nil +} + +func (mv *messageValidator) validateBeaconDuty( + role spectypes.BeaconRole, + slot phase0.Slot, + share *ssvtypes.SSVShare, +) error { + switch role { + case spectypes.BNRoleProposer: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + epoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(slot) + if mv.dutyStore != nil && mv.dutyStore.Proposer.ValidatorDuty(epoch, slot, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + + case spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) + if mv.dutyStore != nil && mv.dutyStore.SyncCommittee.Duty(period, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + } + + return nil +} + +func (mv *messageValidator) hasFullData(signedMsg *specqbft.SignedMessage) bool { + return (signedMsg.Message.MsgType == specqbft.ProposalMsgType || + signedMsg.Message.MsgType == specqbft.RoundChangeMsgType || + mv.isDecidedMessage(signedMsg)) && len(signedMsg.FullData) != 0 // TODO: more complex check of FullData +} + +func (mv *messageValidator) isDecidedMessage(signedMsg *specqbft.SignedMessage) bool { + return signedMsg.Message.MsgType == specqbft.CommitMsgType && len(signedMsg.Signers) > 1 +} + +func (mv *messageValidator) maxRound(role spectypes.BeaconRole) specqbft.Round { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit + return 12 // TODO: consider calculating based on quick timeout and slow timeout + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + return 6 + case spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) specqbft.Round { + if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= roundtimer.QuickTimeoutThreshold { + return currentQuickRound + } + + sinceFirstSlowRound := sinceSlotStart - (time.Duration(roundtimer.QuickTimeoutThreshold) * roundtimer.QuickTimeout) + estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) + return estimatedRound +} + +func (mv *messageValidator) waitAfterSlotStart(role spectypes.BeaconRole) time.Duration { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + return mv.netCfg.Beacon.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + return mv.netCfg.Beacon.SlotDurationSec() / 3 * 2 + case spectypes.BNRoleProposer, spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) validRole(roleType spectypes.BeaconRole) bool { + switch roleType { + case spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + spectypes.BNRoleValidatorRegistration: + return true + } + return false +} + +func (mv *messageValidator) validQBFTMsgType(msgType specqbft.MessageType) bool { + switch msgType { + case specqbft.ProposalMsgType, specqbft.PrepareMsgType, specqbft.CommitMsgType, specqbft.RoundChangeMsgType: + return true + } + return false +} + +func (mv *messageValidator) validConsensusSigners(share *ssvtypes.SSVShare, m *specqbft.SignedMessage) error { + switch { + case len(m.Signers) == 0: + return ErrNoSigners + + case len(m.Signers) == 1: + if m.Message.MsgType == specqbft.ProposalMsgType { + qbftState := &specqbft.State{ + Height: m.Message.Height, + Share: &share.Share, + } + leader := specqbft.RoundRobinProposer(qbftState, m.Message.Round) + if m.Signers[0] != leader { + err := ErrSignerNotLeader + err.got = m.Signers[0] + err.want = leader + return err + } + } + + case m.Message.MsgType != specqbft.CommitMsgType: + e := ErrNonDecidedWithMultipleSigners + e.got = len(m.Signers) + return e + + case !share.HasQuorum(len(m.Signers)) || len(m.Signers) > len(share.Committee): + e := ErrWrongSignersLength + e.want = fmt.Sprintf("between %v and %v", share.Quorum, len(share.Committee)) + e.got = len(m.Signers) + return e + } + + if !slices.IsSorted(m.Signers) { + return ErrSignersNotSorted + } + + var prevSigner spectypes.OperatorID + for _, signer := range m.Signers { + if err := mv.commonSignerValidation(signer, share); err != nil { + return err + } + if signer == prevSigner { + return ErrDuplicatedSigner + } + prevSigner = signer + } + return nil +} diff --git a/message/validation/consensus_validation_test.go b/message/validation/consensus_validation_test.go new file mode 100644 index 0000000000..5f0ae02df1 --- /dev/null +++ b/message/validation/consensus_validation_test.go @@ -0,0 +1,104 @@ +package validation + +import ( + "testing" + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" +) + +func TestMessageValidator_currentEstimatedRound(t *testing.T) { + tt := []struct { + name string + sinceSlotStart time.Duration + want specqbft.Round + }{ + { + name: "0s - expected first round", + sinceSlotStart: 0, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout/2 - expected first round", + sinceSlotStart: roundtimer.QuickTimeout / 2, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout - expected first+1 round", + sinceSlotStart: roundtimer.QuickTimeout, + want: specqbft.FirstRound + 1, + }, + { + name: "QuickTimeout*2 - expected first+2 round", + sinceSlotStart: roundtimer.QuickTimeout * 2, + want: specqbft.FirstRound + 2, + }, + { + name: "QuickTimeout*3 - expected first+3 round", + sinceSlotStart: roundtimer.QuickTimeout * 3, + want: specqbft.FirstRound + 3, + }, + { + name: "QuickTimeout*4 - expected first+4 round", + sinceSlotStart: roundtimer.QuickTimeout * 4, + want: specqbft.FirstRound + 4, + }, + { + name: "QuickTimeout*5 - expected first+5 round", + sinceSlotStart: roundtimer.QuickTimeout * 5, + want: specqbft.FirstRound + 5, + }, + { + name: "QuickTimeout*6 - expected first+6 round", + sinceSlotStart: roundtimer.QuickTimeout * 6, + want: specqbft.FirstRound + 6, + }, + { + name: "QuickTimeout*7 - expected first+7 round", + sinceSlotStart: roundtimer.QuickTimeout * 7, + want: specqbft.FirstRound + 7, + }, + { + name: "QuickTimeout*8 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * 8, + want: specqbft.FirstRound + 8, + }, + { + name: "QuickTimeout*9 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+1), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "QuickTimeout*10 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+2), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "(QuickTimeout*8 + SlowTimeout) - expected first+9 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout, + want: roundtimer.QuickTimeoutThreshold + 2, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*2) - expected first+10 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*2, + want: roundtimer.QuickTimeoutThreshold + 3, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*3) - expected first+11 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*3, + want: roundtimer.QuickTimeoutThreshold + 4, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + mv := &messageValidator{} + got := mv.currentEstimatedRound(tc.sinceSlotStart) + require.Equal(t, tc.want, got) + }) + } +} diff --git a/message/validation/errors.go b/message/validation/errors.go new file mode 100644 index 0000000000..4eeacf4877 --- /dev/null +++ b/message/validation/errors.go @@ -0,0 +1,101 @@ +package validation + +import ( + "fmt" + "strings" +) + +type Error struct { + text string + got any + want any + innerErr error + reject bool + silent bool +} + +func (e Error) Error() string { + var sb strings.Builder + sb.WriteString(e.text) + + if e.got != nil { + sb.WriteString(fmt.Sprintf(", got %v", e.got)) + } + if e.want != nil { + sb.WriteString(fmt.Sprintf(", want %v", e.want)) + } + if e.innerErr != nil { + sb.WriteString(fmt.Sprintf(": %s", e.innerErr.Error())) + } + + return sb.String() +} + +func (e Error) Reject() bool { + return e.reject +} + +func (e Error) Silent() bool { + return e.silent +} + +func (e Error) Text() string { + return e.text +} + +var ( + ErrEmptyData = Error{text: "empty data"} + ErrWrongDomain = Error{text: "wrong domain", silent: true} + ErrNoShareMetadata = Error{text: "share has no metadata"} + ErrUnknownValidator = Error{text: "unknown validator"} + ErrValidatorLiquidated = Error{text: "validator is liquidated"} + ErrValidatorNotAttesting = Error{text: "validator is not attesting"} + ErrSlotAlreadyAdvanced = Error{text: "signer has already advanced to a later slot"} + ErrRoundAlreadyAdvanced = Error{text: "signer has already advanced to a later round"} + ErrRoundTooHigh = Error{text: "round is too high for this role" /*, reject: true*/} // TODO: enable reject + ErrEarlyMessage = Error{text: "early message"} + ErrLateMessage = Error{text: "late message"} + ErrTooManySameTypeMessagesPerRound = Error{text: "too many messages of same type per round"} + ErrRSADecryption = Error{text: "rsa decryption", reject: true} + ErrOperatorNotFound = Error{text: "operator not found", reject: true} + ErrPubSubMessageHasNoData = Error{text: "pub-sub message has no data", reject: true} + ErrPubSubDataTooBig = Error{text: "pub-sub message data too big", reject: true} + ErrMalformedPubSubMessage = Error{text: "pub-sub message is malformed", reject: true} + ErrEmptyPubSubMessage = Error{text: "pub-sub message is empty", reject: true} + ErrTopicNotFound = Error{text: "topic not found", reject: true} + ErrSSVDataTooBig = Error{text: "ssv message data too big", reject: true} + ErrInvalidRole = Error{text: "invalid role", reject: true} + ErrConsensusValidatorRegistration = Error{text: "consensus message for validator registration role", reject: true} + ErrNoSigners = Error{text: "no signers", reject: true} + ErrWrongSignatureSize = Error{text: "wrong signature size", reject: true} + ErrZeroSignature = Error{text: "zero signature", reject: true} + ErrZeroSigner = Error{text: "zero signer ID", reject: true} + ErrSignerNotInCommittee = Error{text: "signer is not in committee", reject: true} + ErrDuplicatedSigner = Error{text: "signer is duplicated", reject: true} + ErrSignerNotLeader = Error{text: "signer is not leader", reject: true} + ErrSignersNotSorted = Error{text: "signers are not sorted", reject: true} + ErrUnexpectedSigner = Error{text: "signer is not expected", reject: true} + ErrInvalidHash = Error{text: "root doesn't match full data hash", reject: true} + ErrEstimatedRoundTooFar = Error{text: "message round is too far from estimated"} + ErrMalformedMessage = Error{text: "message could not be decoded", reject: true} + ErrMalformedSignedMessage = Error{text: "signed message could not be decoded", reject: true} + ErrUnknownSSVMessageType = Error{text: "unknown SSV message type", reject: true} + ErrUnknownQBFTMessageType = Error{text: "unknown QBFT message type", reject: true} + ErrUnknownPartialMessageType = Error{text: "unknown partial signature message type", reject: true} + ErrPartialSignatureTypeRoleMismatch = Error{text: "partial signature type and role don't match", reject: true} + ErrNonDecidedWithMultipleSigners = Error{text: "non-decided with multiple signers", reject: true} + ErrWrongSignersLength = Error{text: "decided signers size is not between quorum and committee size", reject: true} + ErrDuplicatedProposalWithDifferentData = Error{text: "duplicated proposal with different data", reject: true} + ErrEventMessage = Error{text: "event messages are not broadcast", reject: true} + ErrDKGMessage = Error{text: "DKG messages are not supported", reject: true} + ErrMalformedPrepareJustifications = Error{text: "malformed prepare justifications", reject: true} + ErrUnexpectedPrepareJustifications = Error{text: "prepare justifications unexpected for this message type", reject: true} + ErrMalformedRoundChangeJustifications = Error{text: "malformed round change justifications", reject: true} + ErrUnexpectedRoundChangeJustifications = Error{text: "round change justifications unexpected for this message type", reject: true} + ErrInvalidJustifications = Error{text: "invalid justifications", reject: true} + ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch", reject: true} + ErrNoDuty = Error{text: "no duty for this epoch", reject: true} + ErrDeserializePublicKey = Error{text: "deserialize public key", reject: true} + ErrNoPartialMessages = Error{text: "no partial messages", reject: true} + ErrDuplicatedPartialSignatureMessage = Error{text: "duplicated partial signature message", reject: true} +) diff --git a/message/validation/message_counts.go b/message/validation/message_counts.go new file mode 100644 index 0000000000..609ed018bc --- /dev/null +++ b/message/validation/message_counts.go @@ -0,0 +1,156 @@ +package validation + +// message_counts.go contains code for counting and validating messages per validator-slot-round. + +import ( + "fmt" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +// MessageCounts tracks the number of various message types received for validation. +type MessageCounts struct { + PreConsensus int + Proposal int + Prepare int + Commit int + Decided int + RoundChange int + PostConsensus int +} + +// String provides a formatted representation of the MessageCounts. +func (c *MessageCounts) String() string { + return fmt.Sprintf("pre-consensus: %v, proposal: %v, prepare: %v, commit: %v, decided: %v, round change: %v, post-consensus: %v", + c.PreConsensus, + c.Proposal, + c.Prepare, + c.Commit, + c.Decided, + c.RoundChange, + c.PostConsensus, + ) +} + +// ValidateConsensusMessage checks if the provided consensus message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidateConsensusMessage(msg *specqbft.SignedMessage, limits MessageCounts) error { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + if c.Proposal >= limits.Proposal { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("proposal, having %v", c.String()) + return err + } + case specqbft.PrepareMsgType: + if c.Prepare >= limits.Prepare { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("prepare, having %v", c.String()) + return err + } + case specqbft.CommitMsgType: + if len(msg.Signers) == 1 { + if c.Commit >= limits.Commit { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("commit, having %v", c.String()) + return err + } + } + if len(msg.Signers) > 1 { + if c.Decided >= limits.Decided { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("decided, having %v", c.String()) + return err + } + } + case specqbft.RoundChangeMsgType: + if c.RoundChange >= limits.RoundChange { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("round change, having %v", c.String()) + return err + } + default: + panic("unexpected signed message type") // should be checked before + } + + return nil +} + +// ValidatePartialSignatureMessage checks if the provided partial signature message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.SignedPartialSignatureMessage, limits MessageCounts) error { + switch m.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + if c.PreConsensus > limits.PreConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("pre-consensus, having %v", c.String()) + return err + } + case spectypes.PostConsensusPartialSig: + if c.PostConsensus > limits.PostConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("post-consensus, having %v", c.String()) + return err + } + default: + panic("unexpected partial signature message type") // should be checked before + } + + return nil +} + +// RecordConsensusMessage updates the counts based on the provided consensus message type. +func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + c.Proposal++ + case specqbft.PrepareMsgType: + c.Prepare++ + case specqbft.CommitMsgType: + switch { + case len(msg.Signers) == 1: + c.Commit++ + case len(msg.Signers) > 1: + c.Decided++ + default: + panic("expected signers") // 0 length should be checked before + } + case specqbft.RoundChangeMsgType: + c.RoundChange++ + default: + panic("unexpected signed message type") // should be checked before + } +} + +// RecordPartialSignatureMessage updates the counts based on the provided partial signature message type. +func (c *MessageCounts) RecordPartialSignatureMessage(msg *spectypes.SignedPartialSignatureMessage) { + switch msg.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + c.PreConsensus++ + case spectypes.PostConsensusPartialSig: + c.PostConsensus++ + default: + panic("unexpected partial signature message type") // should be checked before + } +} + +// maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. +func maxMessageCounts(committeeSize int) MessageCounts { + maxDecided := maxDecidedCount(committeeSize) + + return MessageCounts{ + PreConsensus: 1, + Proposal: 1, + Prepare: 1, + Commit: 1, + Decided: maxDecided, + RoundChange: 1, + PostConsensus: 1, + } +} + +func maxDecidedCount(committeeSize int) int { + f := (committeeSize - 1) / 3 + return committeeSize * (f + 1) // N * (f + 1) +} diff --git a/message/validation/metrics.go b/message/validation/metrics.go new file mode 100644 index 0000000000..f023fe0689 --- /dev/null +++ b/message/validation/metrics.go @@ -0,0 +1,38 @@ +package validation + +import ( + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +type metrics interface { + MessageAccepted(role spectypes.BeaconRole, round specqbft.Round) + MessageIgnored(reason string, role spectypes.BeaconRole, round specqbft.Round) + MessageRejected(reason string, role spectypes.BeaconRole, round specqbft.Round) + SSVMessageType(msgType spectypes.MsgType) + ConsensusMsgType(msgType specqbft.MessageType, signers int) + MessageValidationDuration(duration time.Duration, labels ...string) + SignatureValidationDuration(duration time.Duration, labels ...string) + MessageSize(size int) + ActiveMsgValidation(topic string) + ActiveMsgValidationDone(topic string) + InCommitteeMessage(msgType spectypes.MsgType, decided bool) + NonCommitteeMessage(msgType spectypes.MsgType, decided bool) +} + +type nopMetrics struct{} + +func (*nopMetrics) ConsensusMsgType(specqbft.MessageType, int) {} +func (*nopMetrics) MessageAccepted(spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageIgnored(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageRejected(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) SSVMessageType(spectypes.MsgType) {} +func (*nopMetrics) MessageValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) SignatureValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) MessageSize(int) {} +func (*nopMetrics) ActiveMsgValidation(string) {} +func (*nopMetrics) ActiveMsgValidationDone(string) {} +func (*nopMetrics) InCommitteeMessage(spectypes.MsgType, bool) {} +func (*nopMetrics) NonCommitteeMessage(spectypes.MsgType, bool) {} diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go new file mode 100644 index 0000000000..0cfcbdac91 --- /dev/null +++ b/message/validation/partial_validation.go @@ -0,0 +1,190 @@ +package validation + +// partial_validation.go contains methods for validating partial signature messages + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "golang.org/x/exp/slices" + + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validatePartialSignatureMessage( + share *ssvtypes.SSVShare, + signedMsg *spectypes.SignedPartialSignatureMessage, + msgID spectypes.MessageID, + signatureVerifier func() error, +) (phase0.Slot, error) { + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } + + msgSlot := signedMsg.Message.Slot + + if !mv.validPartialSigMsgType(signedMsg.Message.Type) { + e := ErrUnknownPartialMessageType + e.got = signedMsg.Message.Type + return msgSlot, e + } + + role := msgID.GetRoleType() + if !mv.partialSignatureTypeMatchesRole(signedMsg.Message.Type, role) { + return msgSlot, ErrPartialSignatureTypeRoleMismatch + } + + if err := mv.validatePartialMessages(share, signedMsg); err != nil { + return msgSlot, err + } + + state := mv.consensusState(msgID) + signerState := state.GetSignerState(signedMsg.Signer) + if signerState != nil { + if err := mv.validateSignerBehaviorPartial(state, signedMsg.Signer, share, msgID, signedMsg); err != nil { + return msgSlot, err + } + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return msgSlot, err + } + + if signatureVerifier != nil { + if err := signatureVerifier(); err != nil { + return msgSlot, err + } + } + + if signerState == nil { + signerState = state.CreateSignerState(signedMsg.Signer) + } + + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, specqbft.FirstRound, newEpoch) + } + + signerState.MessageCounts.RecordPartialSignatureMessage(signedMsg) + + return msgSlot, nil +} + +func (mv *messageValidator) inCommittee(share *ssvtypes.SSVShare) bool { + return slices.ContainsFunc(share.Committee, func(operator *spectypes.Operator) bool { + return operator.OperatorID == mv.ownOperatorID + }) +} + +func (mv *messageValidator) validPartialSigMsgType(msgType spectypes.PartialSigMsgType) bool { + switch msgType { + case spectypes.PostConsensusPartialSig, + spectypes.RandaoPartialSig, + spectypes.SelectionProofPartialSig, + spectypes.ContributionProofs, + spectypes.ValidatorRegistrationPartialSig: + return true + default: + return false + } +} + +func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType spectypes.PartialSigMsgType, role spectypes.BeaconRole) bool { + switch role { + case spectypes.BNRoleAttester: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleAggregator: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.SelectionProofPartialSig + case spectypes.BNRoleProposer: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.RandaoPartialSig + case spectypes.BNRoleSyncCommittee: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleSyncCommitteeContribution: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.ContributionProofs + case spectypes.BNRoleValidatorRegistration: + return msgType == spectypes.ValidatorRegistrationPartialSig + default: + panic("invalid role") // role validity should be checked before + } +} + +func (mv *messageValidator) validatePartialMessages(share *ssvtypes.SSVShare, m *spectypes.SignedPartialSignatureMessage) error { + if err := mv.commonSignerValidation(m.Signer, share); err != nil { + return err + } + + if len(m.Message.Messages) == 0 { + return ErrNoPartialMessages + } + + seen := map[[32]byte]struct{}{} + for _, message := range m.Message.Messages { + if _, ok := seen[message.SigningRoot]; ok { + return ErrDuplicatedPartialSignatureMessage + } + seen[message.SigningRoot] = struct{}{} + + if message.Signer != m.Signer { + err := ErrUnexpectedSigner + err.want = m.Signer + err.got = message.Signer + return err + } + + if err := mv.commonSignerValidation(message.Signer, share); err != nil { + return err + } + + if err := mv.validateSignatureFormat(message.PartialSignature); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorPartial( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *spectypes.SignedPartialSignatureMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return nil + } + + msgSlot := signedMsg.Message.Slot + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if msgSlot <= signerState.Slot { + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidatePartialSignatureMessage(signedMsg, limits); err != nil { + return err + } + } + + return nil +} diff --git a/message/validation/qbft_config.go b/message/validation/qbft_config.go new file mode 100644 index 0000000000..9750b3d8d3 --- /dev/null +++ b/message/validation/qbft_config.go @@ -0,0 +1,52 @@ +package validation + +import ( + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" +) + +// qbftConfig is used in message validation and has no signature verification. +type qbftConfig struct { + domain spectypes.DomainType +} + +func newQBFTConfig(domain spectypes.DomainType) qbftConfig { + return qbftConfig{ + domain: domain, + } +} + +func (q qbftConfig) GetSigner() spectypes.SSVSigner { + panic("should not be called") +} + +func (q qbftConfig) GetSignatureDomainType() spectypes.DomainType { + return q.domain +} + +func (q qbftConfig) GetValueCheckF() specqbft.ProposedValueCheckF { + panic("should not be called") +} + +func (q qbftConfig) GetProposerF() specqbft.ProposerF { + panic("should not be called") +} + +func (q qbftConfig) GetNetwork() specqbft.Network { + panic("should not be called") +} + +func (q qbftConfig) GetStorage() qbftstorage.QBFTStore { + panic("should not be called") +} + +func (q qbftConfig) GetTimer() roundtimer.Timer { + panic("should not be called") +} + +func (q qbftConfig) VerifySignatures() bool { + return false +} diff --git a/message/validation/rsa.go b/message/validation/rsa.go new file mode 100644 index 0000000000..94071ae0f6 --- /dev/null +++ b/message/validation/rsa.go @@ -0,0 +1,57 @@ +package validation + +import ( + "crypto" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "fmt" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/utils/rsaencryption" +) + +func (mv *messageValidator) verifyRSASignature(messageData []byte, operatorID spectypes.OperatorID, signature []byte) error { + rsaPubKey, ok := mv.operatorIDToPubkeyCache.Get(operatorID) + if !ok { + operator, found, err := mv.nodeStorage.GetOperatorData(nil, operatorID) + if err != nil { + e := ErrOperatorNotFound + e.got = operatorID + e.innerErr = err + return e + } + if !found { + e := ErrOperatorNotFound + e.got = operatorID + return e + } + + operatorPubKey, err := base64.StdEncoding.DecodeString(string(operator.PublicKey)) + if err != nil { + e := ErrRSADecryption + e.innerErr = fmt.Errorf("decode public key: %w", err) + return e + } + + rsaPubKey, err = rsaencryption.ConvertPemToPublicKey(operatorPubKey) + if err != nil { + e := ErrRSADecryption + e.innerErr = fmt.Errorf("convert PEM: %w", err) + return e + } + + mv.operatorIDToPubkeyCache.Set(operatorID, rsaPubKey) + } + + messageHash := sha256.Sum256(messageData) + + if err := rsa.VerifyPKCS1v15(rsaPubKey, crypto.SHA256, messageHash[:], signature); err != nil { + e := ErrRSADecryption + e.innerErr = fmt.Errorf("verify opid: %v signature: %w", operatorID, err) + return e + } + + return nil +} diff --git a/message/validation/signer_state.go b/message/validation/signer_state.go new file mode 100644 index 0000000000..dc9bf1818e --- /dev/null +++ b/message/validation/signer_state.go @@ -0,0 +1,45 @@ +package validation + +// signer_state.go describes state of a signer. + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" +) + +// SignerState represents the state of a signer, including its start time, slot, round, +// message counts, proposal data, and the number of duties performed in the current epoch. +type SignerState struct { + Start time.Time + Slot phase0.Slot + Round specqbft.Round + MessageCounts MessageCounts + ProposalData []byte + EpochDuties int +} + +// ResetSlot resets the state's slot, round, message counts, and proposal data to the given values. +// It also updates the start time to the current time and increments the epoch duties count if it's a new epoch. +func (s *SignerState) ResetSlot(slot phase0.Slot, round specqbft.Round, newEpoch bool) { + s.Start = time.Now() + s.Slot = slot + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil + if newEpoch { + s.EpochDuties = 1 + } else { + s.EpochDuties++ + } +} + +// ResetRound resets the state's round, message counts, and proposal data to the given values. +// It also updates the start time to the current time. +func (s *SignerState) ResetRound(round specqbft.Round) { + s.Start = time.Now() + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil +} diff --git a/message/validation/validation.go b/message/validation/validation.go new file mode 100644 index 0000000000..c00b2b8ea3 --- /dev/null +++ b/message/validation/validation.go @@ -0,0 +1,581 @@ +// Package validation provides functions and structures for validating messages. +package validation + +// validator.go contains main code for validation and most of the rule checks. + +import ( + "bytes" + "context" + "crypto/rsa" + "encoding/hex" + "fmt" + "strings" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/cornelk/hashmap" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + operatorstorage "github.com/bloxapp/ssv/operator/storage" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +const ( + // lateMessageMargin is the duration past a message's TTL in which it is still considered valid. + lateMessageMargin = time.Second * 3 + + // clockErrorTolerance is the maximum amount of clock error we expect to see between nodes. + clockErrorTolerance = time.Millisecond * 50 + + maxMessageSize = maxConsensusMsgSize + maxConsensusMsgSize = 8388608 + maxPartialSignatureMsgSize = 1952 + allowedRoundsInFuture = 1 + allowedRoundsInPast = 2 + lateSlotAllowance = 2 + signatureSize = 96 + maxDutiesPerEpoch = 2 +) + +// PubsubMessageValidator defines methods for validating pubsub messages. +type PubsubMessageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult + ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} + +// SSVMessageValidator defines methods for validating SSV messages. +type SSVMessageValidator interface { + ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) +} + +// MessageValidator is an interface that combines both PubsubMessageValidator and SSVMessageValidator. +type MessageValidator interface { + PubsubMessageValidator + SSVMessageValidator +} + +type messageValidator struct { + logger *zap.Logger + metrics metrics + netCfg networkconfig.NetworkConfig + index sync.Map + nodeStorage operatorstorage.Storage + dutyStore *dutystore.Store + ownOperatorID spectypes.OperatorID + operatorIDToPubkeyCache *hashmap.Map[spectypes.OperatorID, *rsa.PublicKey] + + // validationLocks is a map of lock per SSV message ID to + // prevent concurrent access to the same state. + validationLocks map[spectypes.MessageID]*sync.Mutex + validationMutex sync.Mutex + + selfPID peer.ID + selfAccept bool +} + +// NewMessageValidator returns a new MessageValidator with the given network configuration and options. +func NewMessageValidator(netCfg networkconfig.NetworkConfig, opts ...Option) MessageValidator { + mv := &messageValidator{ + logger: zap.NewNop(), + metrics: &nopMetrics{}, + netCfg: netCfg, + operatorIDToPubkeyCache: hashmap.New[spectypes.OperatorID, *rsa.PublicKey](), + validationLocks: make(map[spectypes.MessageID]*sync.Mutex), + } + + for _, opt := range opts { + opt(mv) + } + + return mv +} + +// Option represents a functional option for configuring a messageValidator. +type Option func(validator *messageValidator) + +// WithLogger sets the logger for the messageValidator. +func WithLogger(logger *zap.Logger) Option { + return func(mv *messageValidator) { + mv.logger = logger + } +} + +// WithMetrics sets the metrics for the messageValidator. +func WithMetrics(metrics metrics) Option { + return func(mv *messageValidator) { + mv.metrics = metrics + } +} + +// WithDutyStore sets the duty store for the messageValidator. +func WithDutyStore(dutyStore *dutystore.Store) Option { + return func(mv *messageValidator) { + mv.dutyStore = dutyStore + } +} + +// WithOwnOperatorID sets the operator ID for the messageValidator. +func WithOwnOperatorID(id spectypes.OperatorID) Option { + return func(mv *messageValidator) { + mv.ownOperatorID = id + } +} + +// WithNodeStorage sets the node storage for the messageValidator. +func WithNodeStorage(nodeStorage operatorstorage.Storage) Option { + return func(mv *messageValidator) { + mv.nodeStorage = nodeStorage + } +} + +// WithSelfAccept blindly accepts messages sent from self. Useful for testing. +func WithSelfAccept(selfPID peer.ID, selfAccept bool) Option { + return func(mv *messageValidator) { + mv.selfPID = selfPID + mv.selfAccept = selfAccept + } +} + +// ConsensusDescriptor provides details about the consensus for a message. It's used for logging and metrics. +type ConsensusDescriptor struct { + Round specqbft.Round + QBFTMessageType specqbft.MessageType + Signers []spectypes.OperatorID + Committee []*spectypes.Operator +} + +// Descriptor provides details about a message. It's used for logging and metrics. +type Descriptor struct { + ValidatorPK spectypes.ValidatorPK + Role spectypes.BeaconRole + SSVMessageType spectypes.MsgType + Slot phase0.Slot + Consensus *ConsensusDescriptor +} + +// Fields returns zap logging fields for the descriptor. +func (d Descriptor) Fields() []zapcore.Field { + result := []zapcore.Field{ + fields.Validator(d.ValidatorPK), + fields.Role(d.Role), + zap.String("ssv_message_type", ssvmessage.MsgTypeToString(d.SSVMessageType)), + fields.Slot(d.Slot), + } + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + result = append(result, + fields.Round(d.Consensus.Round), + zap.String("qbft_message_type", ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType)), + zap.Uint64s("signers", d.Consensus.Signers), + zap.Uint64s("committee", committee), + ) + } + + return result +} + +// String provides a string representation of the descriptor. It may be useful for logging. +func (d Descriptor) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("validator PK: %v, role: %v, ssv message type: %v, slot: %v", + hex.EncodeToString(d.ValidatorPK), + d.Role.String(), + ssvmessage.MsgTypeToString(d.SSVMessageType), + d.Slot, + )) + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + sb.WriteString(fmt.Sprintf(", round: %v, qbft message type: %v, signers: %v, committee: %v", + d.Consensus.Round, + ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType), + d.Consensus.Signers, + committee, + )) + } + + return sb.String() +} + +// ValidatorForTopic returns a validation function for the given topic. +// This function can be used to validate messages within the libp2p pubsub framework. +func (mv *messageValidator) ValidatorForTopic(_ string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return mv.ValidatePubsubMessage +} + +// ValidatePubsubMessage validates the given pubsub message. +// Depending on the outcome, it will return one of the pubsub validation results (Accept, Ignore, or Reject). +func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, peerID peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + if mv.selfAccept && peerID == mv.selfPID { + msg, _ := commons.DecodeNetworkMsg(pmsg.Data) + decMsg, _ := queue.DecodeSSVMessage(msg) + pmsg.ValidatorData = decMsg + return pubsub.ValidationAccept + } + + start := time.Now() + var validationDurationLabels []string // TODO: implement + + defer func() { + sinceStart := time.Since(start) + mv.metrics.MessageValidationDuration(sinceStart, validationDurationLabels...) + }() + + decodedMessage, descriptor, err := mv.validateP2PMessage(pmsg, time.Now()) + round := specqbft.Round(0) + if descriptor.Consensus != nil { + round = descriptor.Consensus.Round + } + + f := append(descriptor.Fields(), fields.PeerID(peerID)) + + if err != nil { + var valErr Error + if errors.As(err, &valErr) { + if valErr.Reject() { + if !valErr.Silent() { + f = append(f, zap.Error(err)) + mv.logger.Debug("rejecting invalid message", f...) + } + + mv.metrics.MessageRejected(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationReject + } + + if !valErr.Silent() { + f = append(f, zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + } + mv.metrics.MessageIgnored(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationIgnore + } + + mv.metrics.MessageIgnored(err.Error(), descriptor.Role, round) + f = append(f, zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + return pubsub.ValidationIgnore + } + + pmsg.ValidatorData = decodedMessage + + mv.metrics.MessageAccepted(descriptor.Role, round) + + return pubsub.ValidationAccept +} + +// ValidateSSVMessage validates the given SSV message. +// If successful, it returns the decoded message and its descriptor. Otherwise, it returns an error. +func (mv *messageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) { + return mv.validateSSVMessage(ssvMessage, time.Now(), nil) +} + +func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + topic := pMsg.GetTopic() + + mv.metrics.ActiveMsgValidation(topic) + defer mv.metrics.ActiveMsgValidationDone(topic) + + messageData := pMsg.GetData() + + var signatureVerifier func() error + + currentEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())) + if currentEpoch > mv.netCfg.PermissionlessActivationEpoch { + decMessageData, operatorID, signature, err := commons.DecodeSignedSSVMessage(messageData) + messageData = decMessageData + if err != nil { + e := ErrMalformedSignedMessage + e.innerErr = err + return nil, Descriptor{}, e + } + + signatureVerifier = func() error { + return mv.verifyRSASignature(messageData, operatorID, signature) + } + } + + if len(messageData) == 0 { + return nil, Descriptor{}, ErrPubSubMessageHasNoData + } + + mv.metrics.MessageSize(len(messageData)) + + // Max possible MsgType + MsgID + Data plus 10% for encoding overhead + const maxMsgSize = 4 + 56 + 8388668 + const maxEncodedMsgSize = maxMsgSize + maxMsgSize/10 + if len(messageData) > maxEncodedMsgSize { + e := ErrPubSubDataTooBig + e.got = len(messageData) + return nil, Descriptor{}, e + } + + msg, err := commons.DecodeNetworkMsg(messageData) + if err != nil { + e := ErrMalformedPubSubMessage + e.innerErr = err + return nil, Descriptor{}, e + } + + if msg == nil { + return nil, Descriptor{}, ErrEmptyPubSubMessage + } + + // Check if the message was sent on the right topic. + currentTopic := pMsg.GetTopic() + currentTopicBaseName := commons.GetTopicBaseName(currentTopic) + topics := commons.ValidatorTopicID(msg.GetID().GetPubKey()) + + topicFound := false + for _, tp := range topics { + if tp == currentTopicBaseName { + topicFound = true + break + } + } + if !topicFound { + return nil, Descriptor{}, ErrTopicNotFound + } + + mv.metrics.SSVMessageType(msg.MsgType) + + return mv.validateSSVMessage(msg, receivedAt, signatureVerifier) +} + +func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, receivedAt time.Time, signatureVerifier func() error) (*queue.DecodedSSVMessage, Descriptor, error) { + var descriptor Descriptor + + if len(ssvMessage.Data) == 0 { + return nil, descriptor, ErrEmptyData + } + + if len(ssvMessage.Data) > maxMessageSize { + err := ErrSSVDataTooBig + err.got = len(ssvMessage.Data) + err.want = maxMessageSize + return nil, descriptor, err + } + + if !bytes.Equal(ssvMessage.MsgID.GetDomain(), mv.netCfg.Domain[:]) { + err := ErrWrongDomain + err.got = hex.EncodeToString(ssvMessage.MsgID.GetDomain()) + err.want = hex.EncodeToString(mv.netCfg.Domain[:]) + return nil, descriptor, err + } + + validatorPK := ssvMessage.GetID().GetPubKey() + role := ssvMessage.GetID().GetRoleType() + descriptor.Role = role + descriptor.ValidatorPK = validatorPK + + if !mv.validRole(role) { + return nil, descriptor, ErrInvalidRole + } + + publicKey, err := ssvtypes.DeserializeBLSPublicKey(validatorPK) + if err != nil { + e := ErrDeserializePublicKey + e.innerErr = err + return nil, descriptor, e + } + + var share *ssvtypes.SSVShare + if mv.nodeStorage != nil { + share = mv.nodeStorage.Shares().Get(nil, publicKey.Serialize()) + if share == nil { + e := ErrUnknownValidator + e.got = publicKey.SerializeToHexStr() + return nil, descriptor, e + } + + if share.Liquidated { + return nil, descriptor, ErrValidatorLiquidated + } + + if share.BeaconMetadata == nil { + return nil, descriptor, ErrNoShareMetadata + } + + if !share.BeaconMetadata.IsAttesting() { + err := ErrValidatorNotAttesting + err.got = share.BeaconMetadata.Status.String() + return nil, descriptor, err + } + } + + msg, err := queue.DecodeSSVMessage(ssvMessage) + if err != nil { + if errors.Is(err, queue.ErrUnknownMessageType) { + e := ErrUnknownSSVMessageType + e.got = ssvMessage.GetType() + return nil, descriptor, e + } + + e := ErrMalformedMessage + e.innerErr = err + return nil, descriptor, e + } + + // Lock this SSV message ID to prevent concurrent access to the same state. + mv.validationMutex.Lock() + mutex, ok := mv.validationLocks[msg.GetID()] + if !ok { + mutex = &sync.Mutex{} + mv.validationLocks[msg.GetID()] = mutex + } + mutex.Lock() + defer mutex.Unlock() + mv.validationMutex.Unlock() + + descriptor.SSVMessageType = ssvMessage.MsgType + + if mv.nodeStorage != nil { + switch ssvMessage.MsgType { + case spectypes.SSVConsensusMsgType: + if len(msg.Data) > maxConsensusMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxConsensusMsgSize + return nil, descriptor, e + } + + signedMessage := msg.Body.(*specqbft.SignedMessage) + consensusDescriptor, slot, err := mv.validateConsensusMessage(share, signedMessage, msg.GetID(), receivedAt, signatureVerifier) + descriptor.Consensus = &consensusDescriptor + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case spectypes.SSVPartialSignatureMsgType: + if len(msg.Data) > maxPartialSignatureMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxPartialSignatureMsgSize + return nil, descriptor, e + } + + partialSignatureMessage := msg.Body.(*spectypes.SignedPartialSignatureMessage) + slot, err := mv.validatePartialSignatureMessage(share, partialSignatureMessage, msg.GetID(), signatureVerifier) + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case ssvmessage.SSVEventMsgType: + return nil, descriptor, ErrEventMessage + + case spectypes.DKGMsgType: + return nil, descriptor, ErrDKGMessage + } + } + + return msg, descriptor, nil +} + +func (mv *messageValidator) containsSignerFunc(signer spectypes.OperatorID) func(operator *spectypes.Operator) bool { + return func(operator *spectypes.Operator) bool { + return operator.OperatorID == signer + } +} + +func (mv *messageValidator) validateSignatureFormat(signature []byte) error { + if len(signature) != signatureSize { + e := ErrWrongSignatureSize + e.got = len(signature) + return e + } + + if [signatureSize]byte(signature) == [signatureSize]byte{} { + return ErrZeroSignature + } + return nil +} + +func (mv *messageValidator) commonSignerValidation(signer spectypes.OperatorID, share *ssvtypes.SSVShare) error { + if signer == 0 { + return ErrZeroSigner + } + + if !slices.ContainsFunc(share.Committee, mv.containsSignerFunc(signer)) { + return ErrSignerNotInCommittee + } + + return nil +} + +func (mv *messageValidator) validateSlotTime(messageSlot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) error { + if mv.earlyMessage(messageSlot, receivedAt) { + return ErrEarlyMessage + } + + if lateness := mv.lateMessage(messageSlot, role, receivedAt); lateness > 0 { + e := ErrLateMessage + e.got = fmt.Sprintf("late by %v", lateness) + return e + } + + return nil +} + +func (mv *messageValidator) earlyMessage(slot phase0.Slot, receivedAt time.Time) bool { + return mv.netCfg.Beacon.GetSlotEndTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Add(-clockErrorTolerance).Before(mv.netCfg.Beacon.GetSlotStartTime(slot)) +} + +func (mv *messageValidator) lateMessage(slot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) time.Duration { + var ttl phase0.Slot + switch role { + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + ttl = 1 + lateSlotAllowance + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: + ttl = 32 + lateSlotAllowance + case spectypes.BNRoleValidatorRegistration: + return 0 + } + + deadline := mv.netCfg.Beacon.GetSlotStartTime(slot + ttl). + Add(lateMessageMargin).Add(clockErrorTolerance) + + return mv.netCfg.Beacon.GetSlotStartTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Sub(deadline) +} + +func (mv *messageValidator) consensusState(messageID spectypes.MessageID) *ConsensusState { + id := ConsensusID{ + PubKey: phase0.BLSPubKey(messageID.GetPubKey()), + Role: messageID.GetRoleType(), + } + + if _, ok := mv.index.Load(id); !ok { + cs := &ConsensusState{ + Signers: hashmap.New[spectypes.OperatorID, *SignerState](), + } + mv.index.Store(id, cs) + } + + cs, _ := mv.index.Load(id) + return cs.(*ConsensusState) +} diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go new file mode 100644 index 0000000000..78c759556a --- /dev/null +++ b/message/validation/validation_test.go @@ -0,0 +1,2030 @@ +package validation + +import ( + "bytes" + "crypto" + crand "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/hex" + "math" + "testing" + "time" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/ethereum/go-ethereum/common" + "github.com/herumi/bls-eth-go-binary/bls" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pspb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/stretchr/testify/require" + eth2types "github.com/wealdtech/go-eth2-types/v2" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/rsaencryption" +) + +func Test_ValidateSSVMessage(t *testing.T) { + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + const validatorIndex = 123 + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + Index: validatorIndex, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + netCfg := networkconfig.TestNetwork + + roleAttester := spectypes.BNRoleAttester + + // Message validation happy flow, messages are not ignored or rejected and there are no errors + t.Run("happy flow", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.NoError(t, err) + }) + + // Make sure messages are incremented and throw an ignore message if more than 1 for a commit + t.Run("message counts", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + state := validator.consensusState(msgID) + for i := spectypes.OperatorID(1); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedMsg, err := signedMsg.Encode() + require.NoError(t, err) + + ssvMsg := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMsg, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) + require.NoError(t, err) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + state1 := state.GetSignerState(1) + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Proposal: 1}, state1.MessageCounts) + for i := spectypes.OperatorID(2); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg = spectestingutils.TestingPrepareMessageWithParams(ks.Shares[1], 1, 2, height, spectestingutils.TestingIdentifier, spectestingutils.TestingQBFTRootData) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) + require.NoError(t, err) + + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 2, state1.Round) + require.EqualValues(t, MessageCounts{Prepare: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt, nil) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec()), nil) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec()), nil) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMultiSignerMessageWithHeight([]*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec()), nil) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1, Decided: 1}, state1.MessageCounts) + }) + + // Send a pubsub message with no data should cause an error + t.Run("pubsub message has no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + pmsg := &pubsub.Message{} + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err := validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorIs(t, err, ErrPubSubMessageHasNoData) + }) + + // Send a pubsub message where there is too much data should cause an error + t.Run("pubsub data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: bytes.Repeat([]byte{1}, 10_000_000), + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + e := ErrPubSubDataTooBig + e.got = 10_000_000 + require.ErrorIs(t, err, e) + }) + + // Send a malformed pubsub message (empty message) should return an error + t.Run("empty pubsub message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: []byte{1}, + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + }) + + // Send a message with incorrect data (unable to decode incorrect message type) + t.Run("bad data format", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{1}, 500), + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send a message with no data should return an error + t.Run("no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{}, + } + + _, _, err := validator.validateSSVMessage(message, time.Now(), nil) + require.ErrorIs(t, err, ErrEmptyData) + + message = &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: nil, + } + + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) + require.ErrorIs(t, err, ErrEmptyData) + }) + + // Send a message where there is too much data should cause an error + t.Run("data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + const tooBigMsgSize = maxMessageSize * 2 + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, tooBigMsgSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now(), nil) + expectedErr := ErrSSVDataTooBig + expectedErr.got = tooBigMsgSize + expectedErr.want = maxMessageSize + require.ErrorIs(t, err, expectedErr) + }) + + // Send exact allowed data size amount but with invalid data (fails to decode) + t.Run("data size borderline / malformed message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, maxMessageSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now(), nil) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send an invalid SSV message type returns an error + t.Run("invalid SSV message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: math.MaxUint64, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{0x1}, + } + + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) + require.ErrorContains(t, err, ErrUnknownSSVMessageType.Error()) + }) + + // Empty validator public key returns an error + t.Run("empty validator public key", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, spectypes.ValidatorPK{}, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) + require.ErrorContains(t, err, ErrDeserializePublicKey.Error()) + }) + + // Generate random validator and validate it is unknown to the network + t.Run("unknown validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + sk, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, sk.PublicKey().Marshal(), roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) + expectedErr := ErrUnknownValidator + expectedErr.got = hex.EncodeToString(sk.PublicKey().Marshal()) + require.ErrorIs(t, err, expectedErr) + }) + + // Make sure messages are dropped if on the incorrect network + t.Run("wrong domain", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + wrongDomain := spectypes.DomainType{math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8} + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(wrongDomain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + expectedErr := ErrWrongDomain + expectedErr.got = hex.EncodeToString(wrongDomain[:]) + expectedErr.want = hex.EncodeToString(netCfg.Domain[:]) + require.ErrorIs(t, err, expectedErr) + }) + + // Send message with a value that refers to a non-existent role + t.Run("invalid role", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, math.MaxUint64), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrInvalidRole) + }) + + // Perform validator registration with a consensus type message will give an error + t.Run("consensus validator registration", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleValidatorRegistration), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrConsensusValidatorRegistration) + }) + + // Ignore messages related to a validator that is liquidated + t.Run("liquidated validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + liquidatedSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + liquidatedShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: true, + }, + } + liquidatedShare.ValidatorPubKey = liquidatedSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, liquidatedShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, liquidatedShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now(), nil) + expectedErr := ErrValidatorLiquidated + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, liquidatedShare.ValidatorPubKey)) + }) + + // Ignore messages related to a validator that is not active + t.Run("inactive validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + inactiveShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateUnknown, + }, + Liquidated: false, + }, + } + inactiveShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, inactiveShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, inactiveShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + expectedErr := ErrValidatorNotAttesting + expectedErr.got = eth2apiv1.ValidatorStateUnknown.String() + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, inactiveShare.ValidatorPubKey)) + }) + + // Unable to process a message with a validator that is not on the network + t.Run("no share metadata", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + noMetadataSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + noMetadataShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: nil, + Liquidated: false, + }, + } + noMetadataShare.ValidatorPubKey = noMetadataSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, noMetadataShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, noMetadataShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrNoShareMetadata) + + require.NoError(t, ns.Shares().Delete(nil, noMetadataShare.ValidatorPubKey)) + }) + + // Receive error if more than 2 attestation duties in an epoch + t.Run("too many duties", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+4) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+8) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) + }) + + // Throw error if getting a message for proposal and see there is no message from beacon + t.Run("no proposal duties", func(t *testing.T) { + const epoch = 1 + slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) + height := specqbft.Height(slot) + + dutyStore := dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex+1, ð2apiv1.ProposerDuty{}, true) + validator := NewMessageValidator(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleProposer), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), nil) + require.ErrorContains(t, err, ErrNoDuty.Error()) + + dutyStore = dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) + validator = NewMessageValidator(netCfg, WithNodeStorage(ns), WithDutyStore(dutyStore)).(*messageValidator) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), nil) + require.NoError(t, err) + }) + + // Get error when receiving a message with over 13 partial signatures + t.Run("partial message too big", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + for i := 0; i < 13; i++ { + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + } + + _, err := msg.Encode() + require.ErrorContains(t, err, "max expected 13 and 14 found") + }) + + // Get error when receiving message from operator who is not affiliated with the validator + t.Run("signer ID not in committee", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 5, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrSignerNotInCommittee) + }) + + // Get error when receiving message from operator who is non-existent (operator id 0) + t.Run("partial zero signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 0, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving partial signature message from operator who is the incorrect signer + t.Run("partial inconsistent signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages[0].Signer = 2 + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + expectedErr := ErrUnexpectedSigner + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Receive error when receiving a duplicated partial signature message + t.Run("partial duplicated message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrDuplicatedPartialSignatureMessage) + }) + + // Receive error when "partialSignatureMessages" does not contain any "partialSignatureMessage" + t.Run("no partial signature messages", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = []*spectypes.PartialSignatureMessage{} + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrNoPartialMessages) + }) + + // Receive error when the partial signature message is not enough bytes + t.Run("partial wrong signature size", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = []byte{1} + + encoded, err := msg.Encode() + require.ErrorContains(t, err, "bytes array does not have the correct length") + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Run partial message type validation tests + t.Run("partial message type validation", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(162304) + + // Check happy flow of a duty for each role + t.Run("valid", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleAggregator: {spectypes.PostConsensusPartialSig, spectypes.SelectionProofPartialSig}, + spectypes.BNRoleProposer: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.PostConsensusPartialSig, spectypes.ContributionProofs}, + spectypes.BNRoleValidatorRegistration: {spectypes.ValidatorRegistrationPartialSig}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + innerSig, r, err := spectestingutils.NewTestingKeyManager().SignBeaconObject(spectypes.SSZUint64(spectestingutils.TestingDutyEpoch), phase0.Domain{}, ks.Shares[1].GetPublicKey().Serialize(), phase0.DomainType{}) + require.NoError(t, err) + + innerMsg := spectypes.PartialSignatureMessages{ + Type: msgType, + Messages: []*spectypes.PartialSignatureMessage{ + { + PartialSignature: innerSig, + SigningRoot: r, + Signer: 1, + }, + }, + } + + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(innerMsg, spectypes.PartialSignatureType, ks.Shares[1].GetPublicKey().Serialize()) + require.NoError(t, err) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: innerMsg, + Signature: sig, + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.NoError(t, err) + } + } + }) + + // Get error when receiving a message with an incorrect message type + t.Run("invalid message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: math.MaxUint64, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorContains(t, err, ErrUnknownPartialMessageType.Error()) + }) + + // Get error when sending an unexpected message type for the required duty (sending randao for attestor duty) + t.Run("mismatch", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleAggregator: {spectypes.RandaoPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleProposer: {spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleValidatorRegistration: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: msgType, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorContains(t, err, ErrPartialSignatureTypeRoleMismatch.Error()) + } + } + }) + }) + + // Get error when receiving QBFT message with an invalid type + t.Run("invalid QBFT message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msg := &specqbft.Message{ + MsgType: math.MaxUint64, + Height: height, + Round: specqbft.FirstRound, + Identifier: spectestingutils.TestingIdentifier, + Root: spectestingutils.TestingQBFTRootData, + } + signedMsg := spectestingutils.SignQBFTMsg(ks.Shares[1], 1, msg) + + encodedValidSignedMessage, err := signedMsg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + expectedErr := ErrUnknownQBFTMessageType + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving an incorrect signature size (too small) + t.Run("wrong signature size", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signature = []byte{0x1} + + _, err := validSignedMessage.Encode() + require.Error(t, err) + }) + + // Initialize signature tests + t.Run("zero signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Get error when receiving a consensus message with a zero signature + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + validSignedMessage.Signature = zeroSignature[:] + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrZeroSignature) + }) + + // Get error when receiving a consensus message with a zero signature + t.Run("partial signature message", func(t *testing.T) { + partialSigMessage := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + partialSigMessage.Signature = zeroSignature[:] + + encoded, err := partialSigMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) + require.ErrorIs(t, err, ErrZeroSignature) + }) + }) + + // Get error when receiving a message with an empty list of signers + t.Run("no signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signers = []spectypes.OperatorID{} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrNoSigners) + }) + + // Initialize no signer tests + t.Run("zero signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + zeroSignerKS := spectestingutils.Testing7SharesSet() + zeroSignerShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(zeroSignerKS), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + zeroSignerShare.Committee[0].OperatorID = 0 + zeroSignerShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, zeroSignerShare)) + + // Get error when receiving a consensus message with a zero signer + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessage(zeroSignerKS.Shares[1], 1) + validSignedMessage.Signers = []spectypes.OperatorID{0} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving a partial message with a zero signer + t.Run("partial signature message", func(t *testing.T) { + partialSignatureMessage := spectestingutils.PostConsensusAttestationMsg(zeroSignerKS.Shares[1], 1, specqbft.Height(slot)) + partialSignatureMessage.Signer = 0 + + encoded, err := partialSignatureMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + require.NoError(t, ns.Shares().Delete(nil, zeroSignerShare.ValidatorPubKey)) + }) + + // Get error when receiving a message with duplicated signers + t.Run("non unique signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrDuplicatedSigner) + }) + + // Get error when receiving a message with non-sorted signers + t.Run("signers not sorted", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{3, 2, 1} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrSignersNotSorted) + }) + + // Get error when receiving message from non quorum size amount of signers + t.Run("wrong signers length", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + expectedErr := ErrWrongSignersLength + expectedErr.got = 2 + expectedErr.want = "between 3 and 4" + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a non decided message with multiple signers + t.Run("non decided with multiple signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingMultiSignerProposalMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + expectedErr := ErrNonDecidedWithMultipleSigners + expectedErr.got = 3 + require.ErrorIs(t, err, expectedErr) + }) + + // Send late message for all roles and receive late message error + t.Run("late message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + tests := map[spectypes.BeaconRole]time.Time{ + spectypes.BNRoleAttester: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAttester)), + spectypes.BNRoleAggregator: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAggregator)), + spectypes.BNRoleProposer: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), + spectypes.BNRoleSyncCommittee: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommittee)), + spectypes.BNRoleSyncCommitteeContribution: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommitteeContribution)), + } + + for role, receivedAt := range tests { + role, receivedAt := role, receivedAt + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorContains(t, err, ErrLateMessage.Error()) + }) + } + }) + + // Send early message for all roles before the duty start and receive early message error + t.Run("early message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot - 1) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.ErrorIs(t, err, ErrEarlyMessage) + }) + + // Send message from non-leader acting as a leader should receive an error + t.Run("not a leader", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[2], 2, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + expectedErr := ErrSignerNotLeader + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Send wrong size of data (8 bytes) for a prepare justification message should receive an error + t.Run("malformed prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.PrepareJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) + }) + + // Send prepare justification message without a proposal message should receive an error + t.Run("non-proposal with prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + nil, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingRoundChangeMessage(ks.Shares[1], spectypes.OperatorID(1)), + })) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + expectedErr := ErrUnexpectedPrepareJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message without a proposal message should receive an error + t.Run("non-proposal with round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingPrepareMessage(ks.Shares[1], spectypes.OperatorID(1)), + }), + nil, + ) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + expectedErr := ErrUnexpectedRoundChangeJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message with a malformed message (1 byte) should receive an error + t.Run("malformed round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.RoundChangeJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) + }) + + // Send message root hash that doesnt match the expected root hash should receive an error + t.Run("wrong root hash", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.FullData = []byte{1} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + + expectedErr := ErrInvalidHash + require.ErrorIs(t, err, expectedErr) + }) + + // Receive proposal from same operator twice with different messages (same round) should receive an error + t.Run("double proposal with different data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) + require.NoError(t, err) + + signed2 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + signed2.FullData = []byte{1} + signed2.Message.Root, err = specqbft.HashDataRoot(signed2.FullData) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) + expectedErr := ErrDuplicatedProposalWithDifferentData + require.ErrorIs(t, err, expectedErr) + }) + + // Receive prepare from same operator twice with different messages (same round) should receive an error + t.Run("double prepare", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) + require.NoError(t, err) + + signed2 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive commit from same operator twice with different messages (same round) should receive an error + t.Run("double commit", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) + require.NoError(t, err) + + signed2 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive round change from same operator twice with different messages (same round) should receive an error + t.Run("double round change", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt, nil) + require.NoError(t, err) + + signed2 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt, nil) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 1, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive too many decided messages should receive an error + t.Run("too many decided", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + signed := spectestingutils.TestingCommitMultiSignerMessageWithRound( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, 1) + encodedSigned, err := signed.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedSigned, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + for i := 0; i < maxDecidedCount(len(share.Committee)); i++ { + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + require.NoError(t, err) + } + + _, _, err = validator.validateSSVMessage(message, receivedAt, nil) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "decided, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 8, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive message from a round that is too high for that epoch should receive an error + t.Run("round too high", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + tests := map[spectypes.BeaconRole]specqbft.Round{ + spectypes.BNRoleAttester: 13, + spectypes.BNRoleAggregator: 13, + spectypes.BNRoleProposer: 7, + spectypes.BNRoleSyncCommittee: 7, + spectypes.BNRoleSyncCommitteeContribution: 7, + } + + for role, round := range tests { + role, round := role, round + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, round) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(validator.waitAfterSlotStart(role)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) + require.ErrorContains(t, err, ErrRoundTooHigh.Error()) + }) + } + }) + + // Receive message from a round that is incorrect for current epoch should receive an error + t.Run("round already advanced", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 2) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 1) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) + require.ErrorContains(t, err, ErrRoundAlreadyAdvanced.Error()) + }) + + // Initialize tests for testing when sending a message with a slot before the current one + t.Run("slot already advanced", func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Send a consensus message with a slot before the current one should cause an error + t.Run("consensus message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + signedMessage := spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + + // Send a partial signature message with a slot before the current one should cause an error + t.Run("partial signature message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + message := spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height+1) + message.Message.Slot = phase0.Slot(height) + 1 + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err := message.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.NoError(t, err) + + message = spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height) + message.Message.Slot = phase0.Slot(height) + sig, err = spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err = message.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)), nil) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + }) + + // Receive an event message from an operator that is not myself should receive an error + t.Run("event message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + eventMsg := &ssvtypes.EventMsg{} + encoded, err := eventMsg.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: ssvmessage.SSVEventMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt, nil) + require.ErrorIs(t, err, ErrEventMessage) + }) + + // Get error when receiving an SSV message with an invalid signature. + t.Run("signature verification", func(t *testing.T) { + var afterFork = netCfg.PermissionlessActivationEpoch + 1000 + + t.Run("unsigned message before fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.NoError(t, err) + }) + + t.Run("unsigned message after fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[4], 4, specqbft.Height(afterFork)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + }) + + t.Run("signed message before fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + hash := sha256.Sum256(encodedMsg) + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + signature, err := rsa.SignPKCS1v15(crand.Reader, privKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, operatorID, signature) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + + t.Run("signed message after fork", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + hash := sha256.Sum256(encodedMsg) + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + signature, err := rsa.SignPKCS1v15(crand.Reader, privKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, operatorID, signature) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.NoError(t, err) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + + t.Run("unexpected operator ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + hash := sha256.Sum256(encodedMsg) + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + signature, err := rsa.SignPKCS1v15(crand.Reader, privKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + const unexpectedOperatorID = 2 + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, unexpectedOperatorID, signature) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrOperatorNotFound.Error()) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + + t.Run("malformed signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithNodeStorage(ns)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(afterFork) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, specqbft.Height(slot)) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + encodedMsg, err := commons.EncodeNetworkMsg(message) + require.NoError(t, err) + + privKey, err := rsa.GenerateKey(crand.Reader, 2048) + require.NoError(t, err) + + const operatorID = spectypes.OperatorID(1) + + pubKey, err := rsaencryption.ExtractPublicKey(privKey) + require.NoError(t, err) + + od := ®istrystorage.OperatorData{ + ID: operatorID, + PublicKey: []byte(pubKey), + OwnerAddress: common.Address{}, + } + + found, err := ns.SaveOperatorData(nil, od) + require.NoError(t, err) + require.False(t, found) + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, operatorID, bytes.Repeat([]byte{1}, 256)) + + topicID := commons.ValidatorTopicID(message.GetID().GetPubKey()) + pMsg := &pubsub.Message{ + Message: &pspb.Message{ + Topic: &topicID[0], + Data: encodedMsg, + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pMsg, receivedAt) + require.ErrorContains(t, err, ErrRSADecryption.Error()) + + require.NoError(t, ns.DeleteOperatorData(nil, operatorID)) + }) + }) +} diff --git a/migrations/migration_2_encrypt_shares.go b/migrations/migration_2_encrypt_shares.go index 4ca0eb62c7..03c40a301d 100644 --- a/migrations/migration_2_encrypt_shares.go +++ b/migrations/migration_2_encrypt_shares.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "crypto/x509" "fmt" + "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/storage/basedb" diff --git a/monitoring/README.md b/monitoring/README.md index 4e493bd81b..85150d46c6 100644 --- a/monitoring/README.md +++ b/monitoring/README.md @@ -6,64 +6,103 @@ # SSV - Monitoring -`/metrics` end-point is exposing metrics from ssv node to prometheus. +This page will outline how to monitor an SSV Node using Grafana and Prometheus. +### Pre-requisites +Make sure your node is exposing a `/metrics` and `/health` endpoints. This is done via node configuration, as explained in the [Installation guide on the docs](https://docs.ssv.network/run-a-node/operator-node/installation#create-configuration-file). -Prometheus should also hit `/health` end-point in order to collect the health check metrics. \ -Even if prometheus is not configured, the end-point can simply be polled by a simple HTTP client -(it doesn't contain metrics) +This guide will not go into the details of setting up and running Prometheus or Grafana. For this, we recommend visiting their related documentations: + +[Prometheus docs](https://prometheus.io/docs/introduction/overview/) + +[Grafana docs](https://grafana.com/docs/) + +For Grafana, specifically, [Grafana Cloud](https://grafana.com/docs/grafana-cloud/) is a viable solution, especially for beginners. See the configuration of a [local prometheus service](prometheus/prometheus.yaml). ### Health Check -Health check route is available on `GET /health`. \ -In case the node is healthy it returns an HTTP Code `200` with empty response: +Even if Prometheus is not configured, the `/health` end-point can simply be polled by a simple HTTP client as a health check. \ +In case the node is healthy it returns an HTTP Code `200` with an empty response: ```shell $ curl http://localhost:15000/health ``` -If the node is not healthy, the corresponding errors will be returned with HTTP Code `500`: +If the node is not healthy, the corresponding errors will be returned with HTTP a Code of `500`: ```shell $ curl http://localhost:15000/health {"errors": ["could not sync eth1 events"]} ``` -## Metrics +## Prometheus -`MetricsAPIPort` is used to enable prometheus metrics collection: +In a typical setup, where only one SSV node Docker container is running, Prometheus should be configured with a file like this: -Example: ```yaml -MetricsAPIPort: 15000 +global: + scrape_interval: 10s + evaluation_interval: 10s + +scrape_configs: + - job_name: ssv + metrics_path: /metrics + static_configs: + - targets: + # change the targets according to your setup + # if running prometheus from source, or as executable: + # - :15000 (i.e.: ssv_node:15000, check with docker ps command) + # if running prometheus as docker container: + - host.docker.internal:15000 + - job_name: ssv_health + metrics_path: /health + static_configs: + - targets: + # change the targets according to your setup + # if running prometheus from source, or as executable: + # - :15000 (i.e.: ssv_node:15000, check with docker ps command) + # if running prometheus as docker container: + - host.docker.internal:15000 + ``` -Or as env variable: -```shell -METRICS_API_PORT=15000 +And to launch the Prometheus service as a Docker container as well ([using the official Docker image, as shown here](https://hub.docker.com/r/prom/prometheus)), use this command, where `/path/to/prometheus.yml` is the path and filename of the configuration file described above: + +```bash +docker run \ + -p 9090:9090 \ + -v /path/to/prometheus.yml:/etc/prometheus/prometheus.yml \ + prom/prometheus ``` -## Grafana +> ⚠️ Note: If you are not running Prometheus as a Docker container, but as an executable, change the `targets` in the config file to reflect the correct networking connections. In the case where the SSV Node container is called `ssv_node` the targets should look like this: + +```yaml + - targets: + - ssv_node:15000 +``` + +> Use the `docker ps` command to verify the name of the SSV Node container. + +## Grafana monitoring + +After successfully configuring a Prometheus service, and [adding it as a data source to Grafana](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure-prometheus-data-source/) (read [here for Grafana Cloud](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/data-sources/prometheus/configure-prometheus-data-source/)), a Grafana dashboard can be created. + +Below, an example of two dashboards, respectively monitoring the SSV Node and the performance of an Operator: -In order to setup a grafana dashboard do the following: -1. Enable metrics (`MetricsAPIPort`) -2. Setup Prometheus as mentioned in the beginning of this document and add as data source - * Job name assumed to be '`ssv`' -3. Import dashboards to Grafana: - * [SSV Node dashboard](./grafana/NODE.md) - * [Operator Performance dashboard](./grafana/PERF.md) -4. Align dashboard variables: - * `instance` - container name, used in 'instance' field for metrics coming from prometheus. \ - In the given dashboard, instances names are: `ssv-node-v2-`, make sure to change according to your setup +* [SSV Node monitoring](grafana/dashboard_ssv_node.json) +* [Operator performance monitoring](grafana/dashboard_ssv_operator_performance.json.json) -
+The dashboards leverage Grafana templating so that one can select different datasources, the Grafana SSV operators are inferred from the Prometheus metrics, so if you spin up more SSV operators, they will show up on the dashboard seamlessly. -### Profiling +--- +## Profiling -Profiling can be enabled via config: +Profiling can be enabled in the node configuration file (`config.yaml`): ```yaml EnableProfile: true ``` +> Note: remember to restart the node after changing its configuration All the default `pprof` routes are available via HTTP: ```shell diff --git a/monitoring/grafana/dashboard_msg_validation.json b/monitoring/grafana/dashboard_msg_validation.json new file mode 100644 index 0000000000..8ea0bd8f08 --- /dev/null +++ b/monitoring/grafana/dashboard_msg_validation.json @@ -0,0 +1,2175 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 144, + "iteration": 1695134055974, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 12, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Total", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Ignored", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Rejected", + "refId": "C" + } + ], + "title": "Message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#F2495C", + "mode": "palette-classic", + "seriesBy": "last" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 0, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 3, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Rejected", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Ignored", + "refId": "B" + } + ], + "title": "Ignore/Reject Rate", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 20, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 22, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 23, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 24, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "duplicated proposal with different data", + "late message", + "message round is too far from estimated", + "no duty for this epoch", + "round is too high for this role", + "signer has already advanced to a later slot", + "too many messages of same type per round", + "unknown validator", + "validator is not attesting" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 4, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right", + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation{instance=~\"$instance.*\", reason!=\"\"}[$__interval])) by (reason)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{reason}}", + "refId": "A" + } + ], + "title": "Validation Failure Reason", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 5, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_ssv_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by SSV type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 6, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by QBFT type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 7, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers=\"1\"}[$__interval])) by (signers)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Commit", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers!=\"1\"}[$__interval])) by (signers))", + "hide": false, + "interval": "", + "legendFormat": "Decided", + "refId": "B" + } + ], + "title": "Commit messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "id": 19, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_in_committee{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "in committee", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"decided\"}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "non-committee decided", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"non-decided\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "non-committee non-decided", + "refId": "C" + } + ], + "title": "Committee belonging RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 64 + }, + "id": 9, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Over panel interval", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 72 + }, + "id": 13, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Bytes", + "refId": "A" + } + ], + "title": "Total bytes received RPS (incoming messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 80 + }, + "id": 14, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 15, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_signature_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_signature_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Signature validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 96 + }, + "id": 17, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_incoming{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Incoming, RPS", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_outgoing{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Outgoing, RPS", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_drops{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Dropped, RPS", + "refId": "C" + } + ], + "title": "Queue message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 104 + }, + "id": 18, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_size{instance=~\"$instance.*\"}", + "hide": false, + "interval": "", + "legendFormat": "Size", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_capacity{instance=~\"$instance.*\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Capacity", + "refId": "G" + } + ], + "title": "Queue size/capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 112 + }, + "id": 16, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_time_in_queue_seconds_sum{instance=~\"$instance.*\"}[$__interval])) by (instance)\n/\nsum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval])) by (instance)\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message time in queue (seconds)", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 34, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + "hide": 1, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [ + { + "selected": false, + "text": "ssv-node-v2-1", + "value": "ssv-node-v2-1" + }, + { + "selected": false, + "text": "ssv-node-v2-2", + "value": "ssv-node-v2-2" + }, + { + "selected": false, + "text": "ssv-node-v2-3", + "value": "ssv-node-v2-3" + }, + { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + { + "selected": false, + "text": "ssv-node-v2-5", + "value": "ssv-node-v2-5" + }, + { + "selected": false, + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" + }, + { + "selected": false, + "text": "ssv-node-v2-7", + "value": "ssv-node-v2-7" + }, + { + "selected": false, + "text": "ssv-node-v2-8", + "value": "ssv-node-v2-8" + } + ], + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Message Validation", + "uid": "DppaYPgSk", + "version": 42, + "weekStart": "" +} \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_node.json b/monitoring/grafana/dashboard_ssv_node.json index d5568f9de2..8cd88a8d37 100644 --- a/monitoring/grafana/dashboard_ssv_node.json +++ b/monitoring/grafana/dashboard_ssv_node.json @@ -1,9 +1,57 @@ { + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "gauge", + "name": "Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.2.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph (old)", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -21,12 +69,16 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 115, - "iteration": 1676023992743, + "id": null, "links": [], "liveNow": false, "panels": [ { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "gridPos": { "h": 1, "w": 24, @@ -34,13 +86,22 @@ "y": 0 }, "id": 20, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "refId": "A" + } + ], "title": "Node Health", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Current status of the beacon, ETH1 and SSV operator node", "fieldConfig": { @@ -149,6 +210,8 @@ }, "id": 42, "options": { + "minVizHeight": 75, + "minVizWidth": 75, "orientation": "auto", "reduceOptions": { "calcs": [ @@ -161,13 +224,14 @@ "showThresholdMarkers": true, "text": {} }, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, + "editorMode": "code", "exemplar": false, "expr": "ssv_beacon_status{instance=~\"$instance.*\"}", "instant": true, @@ -178,7 +242,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv_eth1_status{instance=~\"$instance.*\"}", @@ -191,7 +255,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "(ssv_node_status{instance=~\"$instance.*\"} + 1) or (absent(ssv_node_status{instance=~\"$instance.*\"}) * 0)", @@ -208,7 +272,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Health status of the beacon node", "fieldConfig": { @@ -217,6 +281,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -228,6 +295,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -298,11 +366,13 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "displayMode": "list", + "placement": "bottom", + "showLegend": false }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.4", @@ -310,7 +380,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_beacon_status{instance=~\"$instance.*\"}", @@ -325,7 +395,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Health status of the ETH1 node", "fieldConfig": { @@ -334,6 +404,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -345,6 +418,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -415,11 +489,13 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "displayMode": "list", + "placement": "bottom", + "showLegend": false }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.4", @@ -427,7 +503,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_eth1_status{instance=~\"$instance.*\"}", @@ -442,7 +518,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Health status of the SSV operator node", "fieldConfig": { @@ -451,6 +527,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -462,6 +541,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -525,11 +605,13 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "bottom" + "displayMode": "list", + "placement": "bottom", + "showLegend": false }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.0.4", @@ -537,7 +619,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_node_status{instance=~\"$instance.*\"} + 1", @@ -550,7 +632,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "absent(ssv_node_status{instance=~\"$instance.*\"}) * 0", @@ -565,6 +647,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, "gridPos": { "h": 1, "w": 24, @@ -573,6 +659,15 @@ }, "id": 18, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], "title": "Resource Usage", "type": "row" }, @@ -583,7 +678,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "decimals": 2, "description": "RAM memory usage of the SSV operator node", @@ -622,7 +717,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "pointradius": 5, "points": false, "renderer": "flot", @@ -634,7 +729,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (container_memory_working_set_bytes{image!=\"\", pod=~\"$instance.*\"}) by (pod)", @@ -685,7 +780,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Go memory usage of the SSV operator node", "fieldConfig": { @@ -694,6 +789,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -705,6 +803,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -751,14 +850,20 @@ "lastNotNull" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_sys_bytes{instance=~\"$instance.*\"}", "interval": "", @@ -766,6 +871,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_heap_idle_bytes{instance=~\"$instance.*\"}", "hide": false, @@ -774,6 +883,10 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_heap_inuse_bytes{instance=~\"$instance.*\"}", "hide": false, @@ -782,6 +895,10 @@ "refId": "C" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance.*\"}", "hide": false, @@ -800,7 +917,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Number of go routines running on the SSV operator node", "fieldConfig": { @@ -847,7 +964,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "pointradius": 2, "points": false, "renderer": "flot", @@ -857,6 +974,10 @@ "steppedLine": false, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "go_goroutines{instance=~\"$instance.*\"}", "format": "table", @@ -905,7 +1026,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "decimals": 3, "description": "CPU usage of the SSV operator node", @@ -951,7 +1072,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "pointradius": 5, "points": false, "renderer": "flot", @@ -961,6 +1082,10 @@ "steppedLine": true, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "sum (rate (container_cpu_usage_seconds_total{image!=\"\", pod=~\"$instance.*\"}[1m])) by (pod)", "format": "time_series", @@ -1010,7 +1135,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Hard drive memory usage of the SSV operator node", "fieldConfig": { @@ -1019,6 +1144,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1030,6 +1158,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1076,14 +1205,20 @@ "last" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "((kubelet_volume_stats_used_bytes{persistentvolumeclaim=\"$instance\"} / kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=\"$instance\"}) * 100)", "hide": true, @@ -1092,6 +1227,10 @@ "refId": "A" }, { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, "exemplar": true, "expr": "kubelet_volume_stats_used_bytes{persistentvolumeclaim=\"$instance\"}", "format": "time_series", @@ -1110,7 +1249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Network input and output usage of the SSV operator node", "fieldConfig": { @@ -1119,6 +1258,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1130,6 +1272,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 2, "pointSize": 5, @@ -1180,10 +1323,12 @@ "max" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -1191,7 +1336,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate (container_network_receive_bytes_total{pod=~\"$instance.*\"}[1m])) by (pod)", @@ -1207,7 +1352,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate (container_network_transmit_bytes_total{pod=~\"$instance.*\"}[1m])) by (pod)", @@ -1226,6 +1371,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, "gridPos": { "h": 1, "w": 24, @@ -1234,13 +1383,22 @@ }, "id": 12, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], "title": "Network Discovery", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Number of connected peers to the SSV operator node", "fieldConfig": { @@ -1292,12 +1450,12 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_p2p_all_connected_peers{instance=~\"$instance.*\"}", @@ -1312,7 +1470,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Subnet peer distribution based on topic", "fieldConfig": { @@ -1321,6 +1479,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1332,6 +1493,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1379,17 +1541,19 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "ssv_p2p_connected_peers{instance=~\"$instance.*\"}", @@ -1404,7 +1568,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Subnet peers breakdown table of the SSV operator node", "fieldConfig": { @@ -1414,7 +1578,9 @@ }, "custom": { "align": "center", - "displayMode": "color-text", + "cellOptions": { + "type": "color-text" + }, "width": 150 }, "mappings": [], @@ -1424,8 +1590,7 @@ "mode": "absolute", "steps": [ { - "color": "#ccccdc", - "value": null + "color": "#ccccdc" } ] } @@ -1438,8 +1603,10 @@ }, "properties": [ { - "id": "custom.displayMode", - "value": "auto" + "id": "custom.cellOptions", + "value": { + "type": "auto" + } }, { "id": "unit", @@ -1463,8 +1630,7 @@ "mode": "absolute", "steps": [ { - "color": "#ccccdc", - "value": null + "color": "#ccccdc" }, { "color": "#ccccdc", @@ -1518,7 +1684,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:network:subnets:known{instance=~\"$instance.*\"}", @@ -1531,7 +1697,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:network:subnets:connected{instance=~\"$instance.*\"}", @@ -1545,7 +1711,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:network:subnets:my{instance=~\"$instance.*\"}", @@ -1617,7 +1783,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "The discovery rate of the subnet peers.\nFound is rate per second of nodes that were found with discovery. Rejected is rate per second of nodes that were found with discovery but rejected because of limit or subnet", "fieldConfig": { @@ -1658,8 +1824,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1681,7 +1846,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1691,7 +1857,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:network:discovery:found{instance=~\"$instance.*\"}[5m])", @@ -1702,7 +1868,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:network:discovery:rejected{instance=~\"$instance.*\"}[5m])", @@ -1718,7 +1884,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Table breakdown of all the SSV operator nodes", "fieldConfig": { @@ -1728,7 +1894,9 @@ }, "custom": { "align": "center", - "displayMode": "auto", + "cellOptions": { + "type": "auto" + }, "filterable": true }, "mappings": [], @@ -1736,8 +1904,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1898,7 +2065,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:network:peers_identity{instance=~\"$instance.*\"}[5m])", @@ -2035,6 +2202,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, "gridPos": { "h": 1, "w": 24, @@ -2043,13 +2214,22 @@ }, "id": 2, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "refId": "A" + } + ], "title": "Network Messaging", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of inbound / outbound topic messages over the last 5 minutes", "fieldConfig": { @@ -2091,8 +2271,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "#EAB839", @@ -2114,7 +2293,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2125,7 +2305,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\"}[5m]))", @@ -2138,7 +2318,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\", msg_type=\"0\"}[5m]))", @@ -2152,7 +2332,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\", msg_type=\"1\"}[5m]))", @@ -2166,7 +2346,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\", msg_type=\"2\"}[5m]))", @@ -2180,7 +2360,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum (rate(ssv:p2p:pubsub:msg:out{instance=~\"$instance.*\"}[5m]))", @@ -2198,7 +2378,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of incoming topic messages for every topic over the last 5 minutes", "fieldConfig": { @@ -2240,8 +2420,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2268,7 +2447,8 @@ "mean" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2278,7 +2458,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate (ssv:p2p:pubsub:msg:in{instance=~\"$instance.*\"}[5m])", @@ -2293,7 +2473,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of outgoing topic messages for every topic over the last 5 minutes.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", "fieldConfig": { @@ -2334,8 +2514,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2362,7 +2541,8 @@ "mean" ], "displayMode": "table", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2372,7 +2552,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate (ssv:p2p:pubsub:msg:out{instance=~\"$instance.*\"}[5m])", @@ -2387,7 +2567,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Table breakdown for each stream protocol.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive Requests are outgoing.", "fieldConfig": { @@ -2397,15 +2577,16 @@ }, "custom": { "align": "auto", - "displayMode": "auto" + "cellOptions": { + "type": "auto" + } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2460,7 +2641,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:res{instance=~\"$instance.*\"}", @@ -2473,7 +2654,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:req:count{instance=~\"$instance.*\"}", @@ -2487,7 +2668,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:req:success{instance=~\"$instance.*\"}", @@ -2501,7 +2682,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "ssv:p2p:streams:req:active{instance=~\"$instance.*\"}", @@ -2558,7 +2739,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of requests, responses and active requests.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", "fieldConfig": { @@ -2599,8 +2780,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2618,7 +2798,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2629,7 +2810,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:res{instance=~\"$instance.*\"}[5m])", @@ -2642,7 +2823,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:req:count{instance=~\"$instance.*\"}[5m])", @@ -2656,7 +2837,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:req:success{instance=~\"$instance.*\"}[5m])", @@ -2670,7 +2851,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "rate(ssv:p2p:streams:req:active{instance=~\"$instance.*\"}[5m])", @@ -2685,131 +2866,297 @@ "title": "Stream Protocols (time-series)", "transformations": [], "type": "timeseries" - } - ], - "schemaVersion": 34, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - "description": "", - "hide": 1, - "includeAll": false, - "multi": false, - "name": "instance", - "options": [ - { - "selected": false, - "text": "ssv-node-v2-1", - "value": "ssv-node-v2-1" - }, - { - "selected": false, - "text": "ssv-node-v2-2", - "value": "ssv-node-v2-2" - }, - { - "selected": false, - "text": "ssv-node-v2-3", - "value": "ssv-node-v2-3" - }, - { - "selected": false, - "text": "ssv-node-v2-4", - "value": "ssv-node-v2-4" - }, - { - "selected": false, - "text": "ssv-node-v2-5", - "value": "ssv-node-v2-5" - }, - { - "selected": false, - "text": "ssv-node-v2-6", - "value": "ssv-node-v2-6" - }, - { - "selected": false, - "text": "ssv-node-v2-7", - "value": "ssv-node-v2-7" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 81 + }, + "id": 67, + "panels": [], + "title": "Vitals", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Rate per second of requests, responses and active requests.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" }, - { - "selected": false, - "text": "ssv-node-v2-8", - "value": "ssv-node-v2-8" + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#73BF69", + "value": null + } + ] + } + }, + "overrides": [ { - "selected": false, - "text": "ssv-node-9", - "value": "ssv-node-9" + "matcher": { + "id": "byName", + "options": "10ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] }, { - "selected": false, - "text": "ssv-node-10", - "value": "ssv-node-10" + "matcher": { + "id": "byName", + "options": "20ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] }, { - "selected": false, - "text": "ssv-node-11", - "value": "ssv-node-11" + "matcher": { + "id": "byName", + "options": "100ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] }, { - "selected": false, - "text": "ssv-node-12", - "value": "ssv-node-12" + "matcher": { + "id": "byName", + "options": "5000ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 82 + }, + "id": 65, + "maxDataPoints": 25, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.3.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" }, - { - "selected": false, - "text": "ssv-exporter", - "value": "ssv-exporter" + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5ms", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" }, - { - "selected": false, - "text": "ssv-exporter-v2", - "value": "ssv-exporter-v2" + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "10ms", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" }, - { - "selected": true, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "20ms", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" }, - { - "selected": false, - "text": "ssv-node-v3-2", - "value": "ssv-node-v3-2" + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "100ms", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" }, - { - "selected": false, - "text": "ssv-node-v3-3", - "value": "ssv-node-v3-3" + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "500ms", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" }, - { - "selected": false, - "text": "ssv-node-v3-4", - "value": "ssv-node-v3-4" - } - ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-node-9,ssv-node-10,ssv-node-11,ssv-node-12,ssv-exporter,ssv-exporter-v2,ssv-node-v3-1,ssv-node-v3-2,ssv-node-v3-3,ssv-node-v3-4", + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5000.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5000ms", + "refId": "F" + } + ], + "title": "Duty Execution Latency (5m)", + "transformations": [], + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(ssv_beacon_status,instance)", + "description": "", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(ssv_beacon_status,instance)", + "refId": "instance" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", "queryValue": "", + "refresh": 1, + "regex": "", "skipUrlSync": false, - "type": "custom" + "type": "datasource" } ] }, "time": { - "from": "now-6h", + "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Node Dashboard", - "uid": "QNiMrdoVz", - "version": 59, + "uid": "node_dashboard", + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_operator_performance.json b/monitoring/grafana/dashboard_ssv_operator_performance.json index 1ba7c2714f..d7248108e7 100644 --- a/monitoring/grafana/dashboard_ssv_operator_performance.json +++ b/monitoring/grafana/dashboard_ssv_operator_performance.json @@ -1,9 +1,39 @@ { + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.2.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -21,13 +51,16 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 117, - "iteration": 1676024010436, + "id": null, "links": [], "liveNow": false, "panels": [ { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "gridPos": { "h": 1, "w": 24, @@ -36,13 +69,22 @@ }, "id": 8, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "refId": "A" + } + ], "title": "Operator Stats", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Total amount of validators and total active validators\n", "fieldConfig": { @@ -51,6 +93,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -62,6 +107,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -107,17 +153,19 @@ "last" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} == 3)", @@ -128,7 +176,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} != 9)", @@ -140,7 +188,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} == 6)", @@ -152,7 +200,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "count(ssv:validator:v2:status{instance=~\"$instance.*\"} == 9)", @@ -167,6 +215,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -175,13 +227,22 @@ }, "id": 6, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Attester Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -211,6 +272,9 @@ "id": 48, "options": { "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -219,17 +283,18 @@ "fields": "", "values": false }, - "showUnfilled": true + "showUnfilled": true, + "valueMode": "color" }, - "pluginVersion": "8.3.4", + "pluginVersion": "10.2.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -239,7 +304,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 1)", @@ -252,7 +317,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 2)", @@ -265,10 +330,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -282,7 +347,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -291,6 +356,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -302,6 +370,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -347,10 +416,12 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -358,7 +429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"ATTESTER\"}[5m]))", @@ -373,7 +444,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -382,6 +453,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -393,6 +467,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -434,10 +509,12 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -445,7 +522,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"ATTESTER\"}[5m]))", @@ -460,7 +537,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Attestation full flow duration excluding waiting for 1/3 of slot time and attestation data request. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -469,6 +546,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -480,6 +560,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -526,10 +607,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -537,7 +620,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -552,7 +635,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -564,7 +647,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -576,7 +659,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -588,7 +671,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -600,7 +683,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -612,7 +695,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -624,7 +707,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -640,7 +723,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Proposal stage duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -649,6 +732,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -660,6 +746,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -706,10 +793,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -717,7 +806,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))", @@ -732,7 +821,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -744,7 +833,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -756,7 +845,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -768,7 +857,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -780,7 +869,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -792,7 +881,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -804,7 +893,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"2.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"1.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -816,7 +905,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"2.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -828,7 +917,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"proposal\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"proposal\",instance=~\"$instance.*\"}[5m]))\n", @@ -844,7 +933,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Prepare stage duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -853,6 +942,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -864,6 +956,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -910,10 +1003,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -921,7 +1016,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))", @@ -936,7 +1031,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -948,7 +1043,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -960,7 +1055,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -972,7 +1067,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -984,7 +1079,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -996,7 +1091,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1008,7 +1103,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"2.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"1.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1020,7 +1115,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"2.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1032,7 +1127,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"prepare\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"prepare\",instance=~\"$instance.*\"}[5m]))\n", @@ -1048,7 +1143,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Commit stage duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1057,6 +1152,9 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -1068,6 +1166,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1114,10 +1213,12 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "pluginVersion": "8.3.4", @@ -1125,7 +1226,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))", @@ -1140,7 +1241,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1152,7 +1253,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1164,7 +1265,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1176,7 +1277,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1188,7 +1289,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1200,7 +1301,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1212,7 +1313,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"2.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"1.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1224,7 +1325,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"2.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1236,7 +1337,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_instance_stage_duration_seconds_bucket{stage=\"commit\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_instance_stage_duration_seconds_count{stage=\"commit\",instance=~\"$instance.*\"}[5m]))\n", @@ -1252,7 +1353,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for an attester role (duration from proposal to commits) broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1295,8 +1396,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1318,7 +1418,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1329,7 +1430,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -1344,7 +1445,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1356,7 +1457,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1368,7 +1469,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1380,7 +1481,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1392,7 +1493,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1404,7 +1505,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1416,7 +1517,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1432,7 +1533,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for an attester role (signature collection duration) broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1475,8 +1576,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1498,7 +1598,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1509,7 +1610,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -1524,7 +1625,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1536,7 +1637,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1548,7 +1649,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1560,7 +1661,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1572,7 +1673,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1584,7 +1685,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1596,7 +1697,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1612,7 +1713,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Attestation data request duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1655,8 +1756,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1678,7 +1778,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1689,7 +1790,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))", @@ -1704,7 +1805,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1716,7 +1817,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1728,7 +1829,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1740,7 +1841,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1752,7 +1853,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1764,7 +1865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1776,7 +1877,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"ATTESTER\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"ATTESTER\", instance=~\"$instance.*\"}[5m]))\n", @@ -1792,7 +1893,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Attestation submission duration for an attester role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -1835,8 +1936,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1858,7 +1958,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -1869,7 +1970,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))", @@ -1884,7 +1985,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1896,7 +1997,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1908,7 +2009,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1920,7 +2021,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1932,7 +2033,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1944,7 +2045,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1956,7 +2057,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"ATTESTER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"ATTESTER\",instance=~\"$instance.*\"}[5m]))\n", @@ -1971,6 +2072,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -1979,13 +2084,22 @@ }, "id": 4, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Proposer Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -1998,8 +2112,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2030,10 +2143,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2043,7 +2156,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 1)", @@ -2056,7 +2169,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 2)", @@ -2069,10 +2182,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2086,7 +2199,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -2128,8 +2241,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2151,7 +2263,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2162,7 +2275,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"PROPOSER\"}[5m]))", @@ -2177,7 +2290,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -2219,8 +2332,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2238,7 +2350,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2249,7 +2362,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"PROPOSER\"}[5m]))", @@ -2264,7 +2377,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2307,8 +2420,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2330,7 +2442,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2341,7 +2454,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -2356,7 +2469,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2368,7 +2481,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2380,7 +2493,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2392,7 +2505,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2404,7 +2517,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2416,7 +2529,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2428,7 +2541,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2444,7 +2557,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Beacon block request duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2487,8 +2600,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2510,7 +2622,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2521,7 +2634,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))", @@ -2536,7 +2649,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2548,7 +2661,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2560,7 +2673,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2572,7 +2685,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2584,7 +2697,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2596,7 +2709,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2608,7 +2721,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"PROPOSER\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"PROPOSER\", instance=~\"$instance.*\"}[5m]))\n", @@ -2624,7 +2737,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Block submission duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2667,8 +2780,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2690,7 +2802,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2701,7 +2814,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -2716,7 +2829,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2728,7 +2841,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2740,7 +2853,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2752,7 +2865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2764,7 +2877,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2776,7 +2889,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2788,7 +2901,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2804,7 +2917,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Pre-Consensus duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -2847,8 +2960,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -2870,7 +2982,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -2881,7 +2994,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -2896,7 +3009,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2908,7 +3021,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2920,7 +3033,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2932,7 +3045,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2944,7 +3057,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2956,7 +3069,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2968,7 +3081,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -2984,7 +3097,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3027,8 +3140,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3050,7 +3162,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3061,7 +3174,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -3076,7 +3189,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3088,7 +3201,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3100,7 +3213,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3112,7 +3225,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3124,7 +3237,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3136,7 +3249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3148,7 +3261,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3164,7 +3277,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for a proposer role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3207,8 +3320,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3230,7 +3342,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3241,7 +3354,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))", @@ -3256,7 +3369,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3268,7 +3381,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3280,7 +3393,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3292,7 +3405,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3304,7 +3417,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3316,7 +3429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3328,7 +3441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"PROPOSER\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"PROPOSER\",instance=~\"$instance.*\"}[5m]))\n", @@ -3343,6 +3456,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -3351,13 +3468,22 @@ }, "id": 2, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Aggregator Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -3370,8 +3496,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -3402,10 +3527,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3415,7 +3540,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 1)", @@ -3428,7 +3553,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 2)", @@ -3441,10 +3566,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3458,7 +3583,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -3500,8 +3625,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3523,7 +3647,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3534,7 +3659,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"AGGREGATOR\"}[5m]))", @@ -3549,7 +3674,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -3591,8 +3716,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -3610,7 +3734,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3621,7 +3746,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"AGGREGATOR\"}[5m]))", @@ -3636,7 +3761,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for an aggregator role excluding waiting for 2/3 of slot time, attestation data and aggregate attestation requests. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3679,8 +3804,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3702,7 +3826,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3713,7 +3838,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -3728,7 +3853,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3740,7 +3865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3752,7 +3877,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3764,7 +3889,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3776,7 +3901,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3788,7 +3913,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3800,7 +3925,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -3816,7 +3941,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Aggregate attestation request duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -3859,8 +3984,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -3882,7 +4006,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -3893,7 +4018,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))", @@ -3908,7 +4033,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3920,7 +4045,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3932,7 +4057,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3944,7 +4069,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3956,7 +4081,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3968,7 +4093,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3980,7 +4105,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"AGGREGATOR\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"AGGREGATOR\", instance=~\"$instance.*\"}[5m]))\n", @@ -3996,7 +4121,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Proof submission duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4039,8 +4164,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4062,7 +4186,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4073,7 +4198,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4088,7 +4213,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4100,7 +4225,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4112,7 +4237,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4124,7 +4249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4136,7 +4261,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4148,7 +4273,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4160,7 +4285,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4176,7 +4301,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Pre-Consensus duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4219,8 +4344,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4242,7 +4366,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4253,7 +4378,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4268,7 +4393,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4280,7 +4405,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4292,7 +4417,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4304,7 +4429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4316,7 +4441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4328,7 +4453,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4340,7 +4465,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4356,7 +4481,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4399,8 +4524,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4422,7 +4546,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4433,7 +4558,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4448,7 +4573,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4460,7 +4585,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4472,7 +4597,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4484,7 +4609,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4496,7 +4621,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4508,7 +4633,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4520,7 +4645,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4536,7 +4661,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for an aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -4579,8 +4704,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4602,7 +4726,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4613,7 +4738,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))", @@ -4628,7 +4753,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4640,7 +4765,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4652,7 +4777,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4664,7 +4789,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4676,7 +4801,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4688,7 +4813,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4700,7 +4825,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"AGGREGATOR\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"AGGREGATOR\",instance=~\"$instance.*\"}[5m]))\n", @@ -4715,6 +4840,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -4723,13 +4852,22 @@ }, "id": 67, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Sync Committee Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -4742,8 +4880,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -4774,10 +4911,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4787,7 +4924,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 1)", @@ -4800,7 +4937,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 2)", @@ -4813,10 +4950,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4830,7 +4967,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -4872,8 +5009,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -4895,7 +5031,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4906,7 +5043,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE\"}[5m]))", @@ -4921,7 +5058,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -4963,8 +5100,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -4982,7 +5118,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -4993,7 +5130,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE\"}[5m]))", @@ -5008,7 +5145,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for a sync committee role excluding waiting for 1/3 of slot time and beacon block root request.. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5051,8 +5188,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5074,7 +5210,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5085,7 +5222,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5100,7 +5237,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5112,7 +5249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5124,7 +5261,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5136,7 +5273,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5148,7 +5285,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5160,7 +5297,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5172,7 +5309,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5188,7 +5325,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Beacon block root request duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5231,8 +5368,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5254,7 +5390,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5265,7 +5402,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))", @@ -5280,7 +5417,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5292,7 +5429,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5304,7 +5441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5316,7 +5453,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5328,7 +5465,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5340,7 +5477,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5352,7 +5489,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE\", instance=~\"$instance.*\"}[5m]))\n", @@ -5368,7 +5505,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Sync Message submission duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5411,8 +5548,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5434,7 +5570,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5445,7 +5582,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5460,7 +5597,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5472,7 +5609,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5484,7 +5621,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5496,7 +5633,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5508,7 +5645,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5520,7 +5657,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5532,7 +5669,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5548,7 +5685,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5591,8 +5728,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5614,7 +5750,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5625,7 +5762,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5640,7 +5777,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5652,7 +5789,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5664,7 +5801,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5676,7 +5813,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5688,7 +5825,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5700,7 +5837,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5712,7 +5849,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5728,7 +5865,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for a sync committee role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -5771,8 +5908,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5794,7 +5930,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -5805,7 +5942,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))", @@ -5820,7 +5957,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5832,7 +5969,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5844,7 +5981,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5856,7 +5993,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5868,7 +6005,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5880,7 +6017,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5892,7 +6029,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE\",instance=~\"$instance.*\"}[5m]))\n", @@ -5907,6 +6044,10 @@ }, { "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, "gridPos": { "h": 1, "w": 24, @@ -5915,13 +6056,22 @@ }, "id": 69, "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "df1ac3ab-deb1-483c-9e1f-1e623c640584" + }, + "refId": "A" + } + ], "title": "Sync Committee Aggregator Role", "type": "row" }, { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Amount of validators in a certain round number", "fieldConfig": { @@ -5934,8 +6084,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -5966,10 +6115,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -5979,7 +6128,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 1)", @@ -5992,7 +6141,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 2)", @@ -6005,10 +6154,10 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -6022,7 +6171,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of failed submissions within last 5 minutes", "fieldConfig": { @@ -6064,8 +6213,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6087,7 +6235,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6098,7 +6247,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_failed{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE_CONTRIBUTION\"}[5m]))", @@ -6113,7 +6262,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Rate per second of successfully submitted roles within last 5 minutes", "fieldConfig": { @@ -6155,8 +6304,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -6174,7 +6322,8 @@ "legend": { "calcs": [], "displayMode": "list", - "placement": "bottom" + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6185,7 +6334,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_roles_submitted{instance=~\"$instance.*\", role=\"SYNC_COMMITTEE_CONTRIBUTION\"}[5m]))", @@ -6200,7 +6349,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Duty full flow duration for a sync committee aggregator role excluding waiting for slot time, beacon block root and sync committee contribution requests. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6243,8 +6392,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6266,7 +6414,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6277,7 +6426,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -6292,7 +6441,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6304,7 +6453,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6316,7 +6465,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6328,7 +6477,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6340,7 +6489,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6352,7 +6501,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6364,7 +6513,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_duty_full_flow_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_duty_full_flow_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6380,7 +6529,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Sync committee contribution request duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6423,8 +6572,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6446,7 +6594,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6457,7 +6606,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))", @@ -6472,7 +6621,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6484,7 +6633,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6496,7 +6645,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6508,7 +6657,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6520,7 +6669,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6532,7 +6681,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6544,7 +6693,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_beacon_data_request_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\", le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_beacon_data_request_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\", instance=~\"$instance.*\"}[5m]))\n", @@ -6560,7 +6709,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Signed contribution and proof submission duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6603,8 +6752,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6626,7 +6774,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6637,7 +6786,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -6652,7 +6801,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6664,7 +6813,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6676,7 +6825,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6688,7 +6837,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6700,7 +6849,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(sssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6712,7 +6861,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6724,7 +6873,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_beacon_submission_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_beacon_submission_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6740,7 +6889,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Pre-Consensus duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6783,8 +6932,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6806,7 +6954,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6817,7 +6966,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -6832,7 +6981,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6844,7 +6993,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6856,7 +7005,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6868,7 +7017,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6880,7 +7029,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6892,7 +7041,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6904,7 +7053,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_pre_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_pre_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -6920,7 +7069,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Consensus duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -6963,8 +7112,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -6986,7 +7134,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -6997,7 +7146,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -7012,7 +7161,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7024,7 +7173,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7036,7 +7185,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7048,7 +7197,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7060,7 +7209,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7072,7 +7221,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7084,7 +7233,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7100,7 +7249,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "description": "Post-Consensus duration for a sync committee aggregator role. Broken down by seconds used then shown as a distributed percentage.", "fieldConfig": { @@ -7143,8 +7292,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -7166,7 +7314,8 @@ "mean" ], "displayMode": "table", - "placement": "right" + "placement": "right", + "showLegend": true }, "tooltip": { "mode": "single" @@ -7177,7 +7326,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m])) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))", @@ -7192,7 +7341,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.02\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7204,7 +7353,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.05\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7216,7 +7365,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.1\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7228,7 +7377,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.2\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7240,7 +7389,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"0.5\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7252,7 +7401,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"1.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7264,7 +7413,7 @@ { "datasource": { "type": "prometheus", - "uid": "eXfXfqH7z" + "uid": "${datasource}" }, "exemplar": true, "expr": "(sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"+Inf\",instance=~\"$instance.*\"}[5m])) - sum(rate(ssv_validator_post_consensus_duration_seconds_bucket{role=\"SYNC_COMMITTEE_CONTRIBUTION\",le=\"5.0\",instance=~\"$instance.*\"}[5m]))) / ignoring (le) sum(rate(ssv_validator_post_consensus_duration_seconds_count{role=\"SYNC_COMMITTEE_CONTRIBUTION\",instance=~\"$instance.*\"}[5m]))\n", @@ -7278,118 +7427,48 @@ "type": "timeseries" } ], - "refresh": false, - "schemaVersion": 34, - "style": "dark", + "refresh": "", + "schemaVersion": 38, "tags": [], "templating": { "list": [ { - "current": { - "selected": false, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - "hide": 1, + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(ssv_beacon_status,instance)", + "description": "", + "hide": 0, "includeAll": false, "multi": false, "name": "instance", - "options": [ - { - "selected": false, - "text": "ssv-node-v2-1", - "value": "ssv-node-v2-1" - }, - { - "selected": false, - "text": "ssv-node-v2-2", - "value": "ssv-node-v2-2" - }, - { - "selected": false, - "text": "ssv-node-v2-3", - "value": "ssv-node-v2-3" - }, - { - "selected": false, - "text": "ssv-node-v2-4", - "value": "ssv-node-v2-4" - }, - { - "selected": false, - "text": "ssv-node-v2-5", - "value": "ssv-node-v2-5" - }, - { - "selected": false, - "text": "ssv-node-v2-6", - "value": "ssv-node-v2-6" - }, - { - "selected": false, - "text": "ssv-node-v2-7", - "value": "ssv-node-v2-7" - }, - { - "selected": false, - "text": "ssv-node-v2-8", - "value": "ssv-node-v2-8" - }, - { - "selected": false, - "text": "ssv-node-9", - "value": "ssv-node-9" - }, - { - "selected": false, - "text": "ssv-node-10", - "value": "ssv-node-10" - }, - { - "selected": false, - "text": "ssv-node-11", - "value": "ssv-node-11" - }, - { - "selected": false, - "text": "ssv-node-12", - "value": "ssv-node-12" - }, - { - "selected": false, - "text": "ssv-exporter", - "value": "ssv-exporter" - }, - { - "selected": false, - "text": "ssv-exporter-v2", - "value": "ssv-exporter-v2" - }, - { - "selected": true, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - { - "selected": false, - "text": "ssv-node-v3-2", - "value": "ssv-node-v3-2" - }, - { - "selected": false, - "text": "ssv-node-v3-3", - "value": "ssv-node-v3-3" - }, - { - "selected": false, - "text": "ssv-node-v3-4", - "value": "ssv-node-v3-4" - } - ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-node-9,ssv-node-10,ssv-node-11,ssv-node-12,ssv-exporter,ssv-exporter-v2,ssv-node-v3-1,ssv-node-v3-2,ssv-node-v3-3,ssv-node-v3-4", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(ssv_beacon_status,instance)", + "refId": "instance" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", "queryValue": "", + "refresh": 1, + "regex": "", "skipUrlSync": false, - "type": "custom" + "type": "datasource" } ] }, @@ -7400,7 +7479,7 @@ "timepicker": {}, "timezone": "", "title": "Operator Performance Dashboard", - "uid": "w-fXrOo4k", - "version": 95, + "uid": "operator_performance", + "version": 5, "weekStart": "" } \ No newline at end of file diff --git a/monitoring/metricsreporter/metrics_reporter.go b/monitoring/metricsreporter/metrics_reporter.go index 859d46e518..01227e94c6 100644 --- a/monitoring/metricsreporter/metrics_reporter.go +++ b/monitoring/metricsreporter/metrics_reporter.go @@ -4,12 +4,16 @@ import ( "crypto/sha256" "fmt" "strconv" + "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ) // TODO: implement all methods @@ -33,6 +37,10 @@ const ( validatorPending = float64(8) validatorRemoved = float64(9) validatorUnknown = float64(10) + + messageAccepted = "accepted" + messageIgnored = "ignored" + messageRejected = "rejected" ) var ( @@ -65,6 +73,70 @@ var ( Name: "ssv:exporter:operator_index", Help: "operator footprint", }, []string{"pubKey", "index"}) + messageValidationResult = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation", + Help: "Message validation result", + }, []string{"status", "reason", "role", "round"}) + messageValidationSSVType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_ssv_type", + Help: "SSV message type", + }, []string{"type"}) + messageValidationConsensusType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_consensus_type", + Help: "Consensus message type", + }, []string{"type", "signers"}) + messageValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_validation_duration_seconds", + Help: "Message validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + signatureValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_signature_validation_duration_seconds", + Help: "Signature validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + messageSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_size", + Help: "Message size", + Buckets: []float64{100, 500, 1_000, 5_000, 10_000, 50_000, 100_000, 500_000, 1_000_000, 5_000_000}, + }, []string{}) + activeMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv:p2p:pubsub:msg:val:active", + Help: "Count active message validation", + }, []string{"topic"}) + incomingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_incoming", + Help: "The amount of message incoming to the validator's msg queue", + }, []string{"msg_id"}) + outgoingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_outgoing", + Help: "The amount of message outgoing from the validator's msg queue", + }, []string{"msg_id"}) + droppedQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_drops", + Help: "The amount of message dropped from the validator's msg queue", + }, []string{"msg_id"}) + messageQueueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_size", + Help: "Size of message queue", + }, []string{}) + messageQueueCapacity = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_capacity", + Help: "Capacity of message queue", + }, []string{}) + messageTimeInQueue = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_time_in_queue_seconds", + Help: "Time message spent in queue (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.050, 0.100, 0.500, 1, 5, 10, 60}, + }, []string{"msg_id"}) + inCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_in_committee", + Help: "The amount of messages in committee", + }, []string{"ssv_msg_type", "decided"}) + nonCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_non_committee", + Help: "The amount of messages not in committee", + }, []string{"ssv_msg_type", "decided"}) ) type MetricsReporter struct { @@ -84,9 +156,26 @@ func New(opts ...Option) *MetricsReporter { allMetrics := []prometheus.Collector{ ssvNodeStatus, executionClientStatus, + executionClientLastFetchedBlock, validatorStatus, eventProcessed, eventProcessingFailed, + operatorIndex, + messageValidationResult, + messageValidationSSVType, + messageValidationConsensusType, + messageValidationDuration, + signatureValidationDuration, + messageSize, + activeMsgValidation, + incomingQueueMessages, + outgoingQueueMessages, + droppedQueueMessages, + messageQueueSize, + messageQueueCapacity, + messageTimeInQueue, + inCommitteeMessages, + nonCommitteeMessages, } for i, c := range allMetrics { @@ -102,77 +191,183 @@ func New(opts ...Option) *MetricsReporter { return &MetricsReporter{} } -func (m MetricsReporter) SSVNodeHealthy() { +func (m *MetricsReporter) SSVNodeHealthy() { ssvNodeStatus.Set(ssvNodeHealthy) } -func (m MetricsReporter) SSVNodeNotHealthy() { +func (m *MetricsReporter) SSVNodeNotHealthy() { ssvNodeStatus.Set(ssvNodeNotHealthy) } -func (m MetricsReporter) ExecutionClientReady() { +func (m *MetricsReporter) ExecutionClientReady() { executionClientStatus.Set(executionClientOK) } -func (m MetricsReporter) ExecutionClientSyncing() { +func (m *MetricsReporter) ExecutionClientSyncing() { executionClientStatus.Set(executionClientSyncing) } -func (m MetricsReporter) ExecutionClientFailure() { +func (m *MetricsReporter) ExecutionClientFailure() { executionClientStatus.Set(executionClientFailure) } -func (m MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { +func (m *MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { executionClientLastFetchedBlock.Set(float64(block)) } -func (m MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { +func (m *MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { pkHash := fmt.Sprintf("%x", sha256.Sum256(publicKey)) operatorIndex.WithLabelValues(pkHash, strconv.FormatUint(operatorID, 10)).Set(float64(operatorID)) } -func (m MetricsReporter) ValidatorInactive(publicKey []byte) { +func (m *MetricsReporter) ValidatorInactive(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorInactive) } -func (m MetricsReporter) ValidatorNoIndex(publicKey []byte) { +func (m *MetricsReporter) ValidatorNoIndex(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNoIndex) } -func (m MetricsReporter) ValidatorError(publicKey []byte) { +func (m *MetricsReporter) ValidatorError(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorError) } -func (m MetricsReporter) ValidatorReady(publicKey []byte) { +func (m *MetricsReporter) ValidatorReady(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorReady) } -func (m MetricsReporter) ValidatorNotActivated(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotActivated(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotActivated) } -func (m MetricsReporter) ValidatorExiting(publicKey []byte) { +func (m *MetricsReporter) ValidatorExiting(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorExiting) } -func (m MetricsReporter) ValidatorSlashed(publicKey []byte) { +func (m *MetricsReporter) ValidatorSlashed(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorSlashed) } -func (m MetricsReporter) ValidatorNotFound(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotFound(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotFound) } -func (m MetricsReporter) ValidatorPending(publicKey []byte) { +func (m *MetricsReporter) ValidatorPending(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorPending) } -func (m MetricsReporter) ValidatorRemoved(publicKey []byte) { +func (m *MetricsReporter) ValidatorRemoved(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorRemoved) } -func (m MetricsReporter) ValidatorUnknown(publicKey []byte) { +func (m *MetricsReporter) ValidatorUnknown(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorUnknown) } -func (m MetricsReporter) EventProcessed(eventName string) { +func (m *MetricsReporter) EventProcessed(eventName string) { eventProcessed.WithLabelValues(eventName).Inc() } -func (m MetricsReporter) EventProcessingFailed(eventName string) { +func (m *MetricsReporter) EventProcessingFailed(eventName string) { eventProcessingFailed.WithLabelValues(eventName).Inc() } // TODO implement -func (m MetricsReporter) LastBlockProcessed(uint64) {} -func (m MetricsReporter) LogsProcessingError(error) {} +func (m *MetricsReporter) LastBlockProcessed(uint64) {} +func (m *MetricsReporter) LogsProcessingError(error) {} + +func (m *MetricsReporter) MessageAccepted( + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageAccepted, + "", + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageIgnored( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageIgnored, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageRejected( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageRejected, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) SSVMessageType(msgType spectypes.MsgType) { + messageValidationSSVType.WithLabelValues(ssvmessage.MsgTypeToString(msgType)).Inc() +} + +func (m *MetricsReporter) ConsensusMsgType(msgType specqbft.MessageType, signers int) { + messageValidationConsensusType.WithLabelValues(ssvmessage.QBFTMsgTypeToString(msgType), strconv.Itoa(signers)).Inc() +} + +func (m *MetricsReporter) MessageValidationDuration(duration time.Duration, labels ...string) { + messageValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) SignatureValidationDuration(duration time.Duration, labels ...string) { + signatureValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) MessageSize(size int) { + messageSize.WithLabelValues().Observe(float64(size)) +} + +func (m *MetricsReporter) ActiveMsgValidation(topic string) { + activeMsgValidation.WithLabelValues(topic).Inc() +} + +func (m *MetricsReporter) ActiveMsgValidationDone(topic string) { + activeMsgValidation.WithLabelValues(topic).Dec() +} + +func (m *MetricsReporter) IncomingQueueMessage(messageID spectypes.MessageID) { + incomingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) OutgoingQueueMessage(messageID spectypes.MessageID) { + outgoingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) DroppedQueueMessage(messageID spectypes.MessageID) { + droppedQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) MessageQueueSize(size int) { + messageQueueSize.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageQueueCapacity(size int) { + messageQueueCapacity.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageTimeInQueue(messageID spectypes.MessageID, d time.Duration) { + messageTimeInQueue.WithLabelValues(messageID.String()).Observe(d.Seconds()) +} + +func (m *MetricsReporter) InCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + inCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} + +func (m *MetricsReporter) NonCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + nonCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} diff --git a/network/commons/common.go b/network/commons/common.go index e9de6ccfb3..b32bf2da4d 100644 --- a/network/commons/common.go +++ b/network/commons/common.go @@ -31,6 +31,35 @@ const ( topicPrefix = "ssv.v2" ) +const ( + signatureSize = 256 + signatureOffset = 0 + operatorIDSize = 8 + operatorIDOffset = signatureOffset + signatureSize + messageOffset = operatorIDOffset + operatorIDSize +) + +// EncodeSignedSSVMessage serializes the message, op id and signature into bytes +func EncodeSignedSSVMessage(message []byte, operatorID spectypes.OperatorID, signature []byte) []byte { + b := make([]byte, signatureSize+operatorIDSize+len(message)) + copy(b[signatureOffset:], signature) + binary.LittleEndian.PutUint64(b[operatorIDOffset:], operatorID) + copy(b[messageOffset:], message) + return b +} + +// DecodeSignedSSVMessage deserializes signed message bytes messsage, op id and a signature +func DecodeSignedSSVMessage(encoded []byte) ([]byte, spectypes.OperatorID, []byte, error) { + if len(encoded) < messageOffset { + return nil, 0, nil, fmt.Errorf("unexpected encoded message size of %d", len(encoded)) + } + + message := encoded[messageOffset:] + operatorID := binary.LittleEndian.Uint64(encoded[operatorIDOffset : operatorIDOffset+operatorIDSize]) + signature := encoded[signatureOffset : signatureOffset+signatureSize] + return message, operatorID, signature, nil +} + // SubnetTopicID returns the topic to use for the given subnet func SubnetTopicID(subnet int) string { if subnet < 0 { @@ -74,6 +103,7 @@ func MsgID() MsgIDFunc { if len(msg) == 0 { return "" } + b := make([]byte, 12) binary.LittleEndian.PutUint64(b, xxhash.Sum64(msg)) return string(b) diff --git a/network/commons/keys.go b/network/commons/keys.go index b898e642c8..4525de49fe 100644 --- a/network/commons/keys.go +++ b/network/commons/keys.go @@ -3,6 +3,8 @@ package commons import ( "crypto/ecdsa" crand "crypto/rand" + "crypto/rsa" + "crypto/x509" "math/big" "github.com/btcsuite/btcd/btcec/v2" @@ -12,8 +14,8 @@ import ( "github.com/pkg/errors" ) -// ConvertFromInterfacePrivKey converts crypto.PrivKey back to ecdsa.PrivateKey -func ConvertFromInterfacePrivKey(privkey crypto.PrivKey) (*ecdsa.PrivateKey, error) { +// ECDSAPrivFromInterface converts crypto.PrivKey back to ecdsa.PrivateKey +func ECDSAPrivFromInterface(privkey crypto.PrivKey) (*ecdsa.PrivateKey, error) { secpKey := (privkey.(*crypto.Secp256k1PrivateKey)) rawKey, err := secpKey.Raw() if err != nil { @@ -27,8 +29,8 @@ func ConvertFromInterfacePrivKey(privkey crypto.PrivKey) (*ecdsa.PrivateKey, err return privKey, nil } -// ConvertToInterfacePrivkey converts ecdsa.PrivateKey to crypto.PrivKey -func ConvertToInterfacePrivkey(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error) { +// ECDSAPrivToInterface converts ecdsa.PrivateKey to crypto.PrivKey +func ECDSAPrivToInterface(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error) { privBytes := privkey.D.Bytes() // In the event the number of bytes outputted by the big-int are less than 32, // we append bytes to the start of the sequence for the missing most significant @@ -39,14 +41,14 @@ func ConvertToInterfacePrivkey(privkey *ecdsa.PrivateKey) (crypto.PrivKey, error return crypto.UnmarshalSecp256k1PrivateKey(privBytes) } -// ConvertFromInterfacePubKey converts crypto.PubKey to ecdsa.PublicKey -func ConvertFromInterfacePubKey(pubKey crypto.PubKey) *ecdsa.PublicKey { +// ECDSAPubFromInterface converts crypto.PubKey to ecdsa.PublicKey +func ECDSAPubFromInterface(pubKey crypto.PubKey) *ecdsa.PublicKey { pk := btcec.PublicKey(*(pubKey.(*crypto.Secp256k1PublicKey))) return pk.ToECDSA() } -// ConvertToInterfacePubkey converts ecdsa.PublicKey to crypto.PubKey -func ConvertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { +// ECDSAPubToInterface converts ecdsa.PublicKey to crypto.PubKey +func ECDSAPubToInterface(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { xVal, yVal := new(btcec.FieldVal), new(btcec.FieldVal) if xVal.SetByteSlice(pubkey.X.Bytes()) { return nil, errors.Errorf("X value overflows") @@ -61,11 +63,17 @@ func ConvertToInterfacePubkey(pubkey *ecdsa.PublicKey) (crypto.PubKey, error) { return newKey, nil } +// RSAPrivToInterface converts ecdsa.PrivateKey to crypto.PrivKey +func RSAPrivToInterface(privkey *rsa.PrivateKey) (crypto.PrivKey, error) { + rsaPrivDER := x509.MarshalPKCS1PrivateKey(privkey) + return crypto.UnmarshalRsaPrivateKey(rsaPrivDER) +} + // GenNetworkKey generates a new network key func GenNetworkKey() (*ecdsa.PrivateKey, error) { privInterfaceKey, _, err := crypto.GenerateSecp256k1Key(crand.Reader) if err != nil { return nil, errors.WithMessage(err, "could not generate 256k1 key") } - return ConvertFromInterfacePrivKey(privInterfaceKey) + return ECDSAPrivFromInterface(privInterfaceKey) } diff --git a/network/discovery/dv5_service.go b/network/discovery/dv5_service.go index ee35c3f794..98312675ce 100644 --- a/network/discovery/dv5_service.go +++ b/network/discovery/dv5_service.go @@ -3,6 +3,7 @@ package discovery import ( "bytes" "context" + "fmt" "net" "sync/atomic" "time" @@ -13,6 +14,8 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/network/commons" @@ -54,7 +57,8 @@ type DiscV5Service struct { publishState int32 conn *net.UDPConn - subnets []byte + domainType spectypes.DomainType + subnets []byte } func newDiscV5Service(pctx context.Context, logger *zap.Logger, discOpts *Options) (Service, error) { @@ -65,6 +69,7 @@ func newDiscV5Service(pctx context.Context, logger *zap.Logger, discOpts *Option publishState: publishStateReady, conns: discOpts.ConnIndex, subnetsIdx: discOpts.SubnetsIdx, + domainType: discOpts.DomainType, subnets: discOpts.DiscV5Opts.Subnets, } @@ -102,7 +107,7 @@ func (dvs *DiscV5Service) Node(logger *zap.Logger, info peer.AddrInfo) (*enode.N if err != nil { return nil, err } - pk := commons.ConvertFromInterfacePubKey(pki) + pk := commons.ECDSAPubFromInterface(pki) id := enode.PubkeyToIDV4(pk) logger = logger.With(zap.String("info", info.String()), zap.String("enode.ID", id.String())) @@ -121,37 +126,54 @@ func (dvs *DiscV5Service) Node(logger *zap.Logger, info peer.AddrInfo) (*enode.N // if we reached peers limit, make sure to accept peers with more than 1 shared subnet, // which lets other components to determine whether we'll want to connect to this node or not. func (dvs *DiscV5Service) Bootstrap(logger *zap.Logger, handler HandleNewPeer) error { - zeroSubnets, _ := records.Subnets{}.FromString(records.ZeroSubnets) + logger = logger.Named(logging.NameDiscoveryService) dvs.discover(dvs.ctx, func(e PeerEvent) { - nodeSubnets, err := records.GetSubnetsEntry(e.Node.Record()) + logger := logger.With( + fields.ENR(e.Node), + fields.PeerID(e.AddrInfo.ID), + ) + err := dvs.checkPeer(logger, e) if err != nil { - logger.Debug("could not read subnets", fields.ENR(e.Node)) - return - } - if bytes.Equal(zeroSubnets, nodeSubnets) { - logger.Debug("skipping zero subnets", fields.ENR(e.Node)) + logger.Debug("discovered peer was dropped", zap.Error(err)) return } - updated := dvs.subnetsIdx.UpdatePeerSubnets(e.AddrInfo.ID, nodeSubnets) - if updated { - logger.Debug("[discv5] peer subnets were updated", fields.ENR(e.Node), - fields.PeerID(e.AddrInfo.ID), - fields.Subnets(records.Subnets(nodeSubnets))) - } - if !dvs.limitNodeFilter(e.Node) { - if !dvs.sharedSubnetsFilter(1)(e.Node) { - metricRejectedNodes.Inc() - return - } - } - metricFoundNodes.Inc() handler(e) }, defaultDiscoveryInterval) // , dvs.forkVersionFilter) //, dvs.badNodeFilter) return nil } +var zeroSubnets, _ = records.Subnets{}.FromString(records.ZeroSubnets) + +func (dvs *DiscV5Service) checkPeer(logger *zap.Logger, e PeerEvent) error { + // Get the peer's domain type, skipping if it mismatches ours. + // TODO: uncomment errors once there are sufficient nodes with domain type. + nodeDomainType, err := records.GetDomainTypeEntry(e.Node.Record()) + if err != nil { + // TODO: skip missing domain type (likely old node). + } else if nodeDomainType != dvs.domainType { + // TODO: skip different domain type. + } + + // Get the peer's subnets, skipping if it has none. + nodeSubnets, err := records.GetSubnetsEntry(e.Node.Record()) + if err != nil { + return fmt.Errorf("could not read subnets: %w", err) + } + if bytes.Equal(zeroSubnets, nodeSubnets) { + return errors.New("zero subnets") + } + + dvs.subnetsIdx.UpdatePeerSubnets(e.AddrInfo.ID, nodeSubnets) + if !dvs.limitNodeFilter(e.Node) && !dvs.sharedSubnetsFilter(1)(e.Node) { + metricRejectedNodes.Inc() + return errors.New("no shared subnets") + } + metricFoundNodes.Inc() + return nil +} + // initDiscV5Listener creates a new listener and starts it func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Options) error { opts := discOpts.DiscV5Opts @@ -184,7 +206,7 @@ func (dvs *DiscV5Service) initDiscV5Listener(logger *zap.Logger, discOpts *Optio dvs.bootnodes = dv5Cfg.Bootnodes logger.Debug("started discv5 listener (UDP)", fields.BindIP(bindIP), - zap.Int("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.OperatorIDStr(opts.OperatorID)) + zap.Int("UdpPort", opts.Port), fields.ENRLocalNode(localNode), fields.Domain(discOpts.DomainType)) return nil } @@ -306,31 +328,22 @@ func (dvs *DiscV5Service) createLocalNode(logger *zap.Logger, discOpts *Options, if err != nil { return nil, errors.Wrap(err, "could not add configured addresses") } - err = DecorateNode(localNode, map[string]interface{}{ - "operatorID": opts.OperatorID, - "subnets": opts.Subnets, - }) + err = DecorateNode( + localNode, + + // Satisfy decorations of forks supported by this node. + DecorateWithDomainType(dvs.domainType), + DecorateWithSubnets(opts.Subnets), + ) if err != nil { return nil, errors.Wrap(err, "could not decorate local node") } - logger.Debug("node record is ready", fields.ENRLocalNode(localNode), fields.OperatorIDStr(opts.OperatorID), fields.Subnets(opts.Subnets)) + logger.Debug("node record is ready", fields.ENRLocalNode(localNode), fields.Domain(dvs.domainType), fields.Subnets(opts.Subnets)) return localNode, nil } -// DecorateNode will enrich the local node record with more entries, according to current fork -func DecorateNode(node *enode.LocalNode, args map[string]interface{}) error { - var subnets []byte - raw, ok := args["subnets"] - if !ok { - subnets = make([]byte, commons.Subnets()) - } else { - subnets = raw.([]byte) - } - return records.SetSubnetsEntry(node, subnets) -} - // newUDPListener creates a udp server func newUDPListener(bindIP net.IP, port int, network string) (*net.UDPConn, error) { udpAddr := &net.UDPAddr{ diff --git a/network/discovery/dv5_service_test.go b/network/discovery/dv5_service_test.go new file mode 100644 index 0000000000..7e0ed5c3ba --- /dev/null +++ b/network/discovery/dv5_service_test.go @@ -0,0 +1,140 @@ +package discovery + +import ( + "context" + "net" + "os" + "testing" + + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/network/peers" + "github.com/bloxapp/ssv/network/peers/connections/mock" + "github.com/bloxapp/ssv/network/records" + "github.com/bloxapp/ssv/utils" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestCheckPeer(t *testing.T) { + var ( + ctx = context.Background() + logger = zap.NewNop() + myDomainType = spectypes.DomainType{0x1, 0x2, 0x3, 0x4} + mySubnets = mockSubnets(1, 2, 3) + tests = []*checkPeerTest{ + { + name: "valid", + domainType: &myDomainType, + subnets: mySubnets, + expectedError: nil, + }, + { + name: "missing domain type", + domainType: nil, + subnets: mySubnets, + expectedError: nil, + }, + { + name: "different domain type", + domainType: &spectypes.DomainType{0x1, 0x2, 0x3, 0x5}, + subnets: mySubnets, + expectedError: nil, + }, + { + name: "missing subnets", + domainType: &myDomainType, + subnets: nil, + expectedError: errors.New("could not read subnets"), + }, + { + name: "inactive subnets", + domainType: &myDomainType, + subnets: mockSubnets(), + expectedError: errors.New("zero subnets"), + }, + { + name: "no shared subnets", + domainType: &myDomainType, + subnets: mockSubnets(0, 4, 5), + expectedError: errors.New("no shared subnets"), + }, + { + name: "one shared subnet", + domainType: &myDomainType, + subnets: mockSubnets(0, 1, 4), + expectedError: nil, + }, + { + name: "two shared subnets", + domainType: &myDomainType, + subnets: mockSubnets(0, 1, 2), + expectedError: nil, + }, + } + ) + + // Create the LocalNode instances for the tests. + for _, test := range tests { + // Create a random network key. + priv, err := utils.ECDSAPrivateKey(logger, "") + require.NoError(t, err) + + // Create a temporary directory for storage. + tempDir := t.TempDir() + defer os.RemoveAll(tempDir) + + localNode, err := records.CreateLocalNode(priv, tempDir, net.ParseIP("127.0.0.1"), 12000, 13000) + require.NoError(t, err) + + if test.domainType != nil { + err := records.SetDomainTypeEntry(localNode, *test.domainType) + require.NoError(t, err) + } + if test.subnets != nil { + err := records.SetSubnetsEntry(localNode, test.subnets) + require.NoError(t, err) + } + + test.localNode = localNode + } + + // Run the tests. + subnetIndex := peers.NewSubnetsIndex(commons.Subnets()) + dvs := &DiscV5Service{ + ctx: ctx, + conns: &mock.MockConnectionIndex{LimitValue: true}, + subnetsIdx: subnetIndex, + domainType: myDomainType, + subnets: mySubnets, + } + + for _, test := range tests { + err := dvs.checkPeer(logger, PeerEvent{ + Node: test.localNode.Node(), + }) + if test.expectedError != nil { + require.ErrorContains(t, err, test.expectedError.Error(), test.name) + } else { + require.NoError(t, err, test.name) + } + } +} + +type checkPeerTest struct { + name string + domainType *spectypes.DomainType + subnets []byte + localNode *enode.LocalNode + expectedError error +} + +func mockSubnets(active ...int) []byte { + subnets := make([]byte, commons.Subnets()) + for _, subnet := range active { + subnets[subnet] = 1 + } + return subnets +} diff --git a/network/discovery/enode.go b/network/discovery/enode.go index 351aed40cd..0027ed4cbc 100644 --- a/network/discovery/enode.go +++ b/network/discovery/enode.go @@ -6,12 +6,13 @@ import ( "fmt" "net" - "github.com/bloxapp/ssv/network/commons" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" + + "github.com/bloxapp/ssv/network/commons" ) // createLocalNode create a new enode.LocalNode instance @@ -69,7 +70,7 @@ func ToPeer(node *enode.Node) (*peer.AddrInfo, error) { // PeerID returns the peer id of the node func PeerID(node *enode.Node) (peer.ID, error) { - pk, err := commons.ConvertToInterfacePubkey(node.Pubkey()) + pk, err := commons.ECDSAPubToInterface(node.Pubkey()) if err != nil { return "", err } diff --git a/network/discovery/enode_test.go b/network/discovery/enode_test.go index 7f7c9a12cb..bcf273548c 100644 --- a/network/discovery/enode_test.go +++ b/network/discovery/enode_test.go @@ -43,7 +43,7 @@ func Test_ParseENR(t *testing.T) { func localNodeMock(t *testing.T) *enode.LocalNode { sk, _, err := crypto.GenerateSecp256k1Key(crand.Reader) require.NoError(t, err) - pk, err := commons.ConvertFromInterfacePrivKey(sk) + pk, err := commons.ECDSAPrivFromInterface(sk) require.NoError(t, err) ip, err := commons.IPAddr() require.NoError(t, err) diff --git a/network/discovery/node_record.go b/network/discovery/node_record.go new file mode 100644 index 0000000000..d0e4d328b7 --- /dev/null +++ b/network/discovery/node_record.go @@ -0,0 +1,33 @@ +package discovery + +import ( + "fmt" + + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/network/records" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +type NodeRecordDecoration func(*enode.LocalNode) error + +func DecorateWithDomainType(domainType spectypes.DomainType) NodeRecordDecoration { + return func(node *enode.LocalNode) error { + return records.SetDomainTypeEntry(node, domainType) + } +} + +func DecorateWithSubnets(subnets []byte) NodeRecordDecoration { + return func(node *enode.LocalNode) error { + return records.SetSubnetsEntry(node, subnets) + } +} + +// DecorateNode will enrich the local node record with more entries, according to current fork +func DecorateNode(node *enode.LocalNode, decorations ...NodeRecordDecoration) error { + for _, decoration := range decorations { + if err := decoration(node); err != nil { + return fmt.Errorf("failed to decorate node record: %w", err) + } + } + return nil +} diff --git a/network/discovery/options.go b/network/discovery/options.go index 0902d78514..63e3614f70 100644 --- a/network/discovery/options.go +++ b/network/discovery/options.go @@ -32,8 +32,6 @@ type DiscV5Options struct { Bootnodes []string // Subnets is a bool slice represents all the subnets the node is intreseted in Subnets []byte - // OperatorID is the operator id (optional) - OperatorID string // EnableLogging when true enables logs to be emitted EnableLogging bool } diff --git a/network/discovery/service.go b/network/discovery/service.go index a44f910828..1d2da89815 100644 --- a/network/discovery/service.go +++ b/network/discovery/service.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/network/peers" ) @@ -39,6 +40,9 @@ type Options struct { SubnetsIdx peers.SubnetsIndex HostAddress string HostDNS string + + // DomainType is the SSV network domain of the node + DomainType spectypes.DomainType } // Service is the interface for discovery diff --git a/network/network.go b/network/network.go index 67af7476fb..f40678892c 100644 --- a/network/network.go +++ b/network/network.go @@ -1,19 +1,19 @@ package network import ( + "context" "io" "go.uber.org/zap" - spectypes "github.com/bloxapp/ssv-spec/types" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) // MessageRouter is accepting network messages and route them to the corresponding (internal) components type MessageRouter interface { // Route routes the given message, this function MUST NOT block - Route(logger *zap.Logger, message spectypes.SSVMessage) + Route(ctx context.Context, message *queue.DecodedSSVMessage) } // MessageRouting allows to register a MessageRouter diff --git a/network/p2p/config.go b/network/p2p/config.go index 77f1e599b5..3880b52311 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -3,17 +3,23 @@ package p2pv1 import ( "context" "crypto/ecdsa" + "crypto/rsa" "fmt" "strings" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/security/noise" libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/monitoring/metricsreporter" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/networkconfig" @@ -52,8 +58,12 @@ type Config struct { DiscoveryTrace bool `yaml:"DiscoveryTrace" env:"DISCOVERY_TRACE" env-description:"Flag to turn on/off discovery tracing in logs"` // NetworkPrivateKey is used for network identity, MUST be injected NetworkPrivateKey *ecdsa.PrivateKey - // OperatorPublicKey is used for operator identity, optional - OperatorID string + // OperatorPrivateKey is used for operator identity, MUST be injected + OperatorPrivateKey *rsa.PrivateKey + // OperatorPubKeyHash is hash of operator public key, used for identity, optional + OperatorPubKeyHash string + // OperatorID contains numeric operator ID + OperatorID func() spectypes.OperatorID // Router propagate incoming network messages to the responsive components Router network.MessageRouter // UserAgent to use by libp2p identify protocol @@ -62,6 +72,10 @@ type Config struct { NodeStorage storage.Storage // Network defines a network configuration. Network networkconfig.NetworkConfig + // MessageValidator validates incoming messages. + MessageValidator validation.MessageValidator + // Metrics report metrics. + Metrics *metricsreporter.MetricsReporter PubsubMsgCacheTTL time.Duration `yaml:"PubsubMsgCacheTTL" env:"PUBSUB_MSG_CACHE_TTL" env-description:"How long a message ID will be remembered as seen"` PubsubOutQueueSize int `yaml:"PubsubOutQueueSize" env:"PUBSUB_OUT_Q_SIZE" env-description:"The size that we assign to the outbound pubsub message queue"` @@ -74,12 +88,13 @@ type Config struct { GetValidatorStats network.GetValidatorStats - PermissionedActivateEpoch uint64 `yaml:"PermissionedActivateEpoch" env:"PERMISSIONED_ACTIVE_EPOCH" env-default:"0" env-description:"On which epoch to start only accepting peers that are operators registered in the contract"` - PermissionedDeactivateEpoch uint64 `yaml:"PermissionedDeactivateEpoch" env:"PERMISSIONED_DEACTIVE_EPOCH" env-default:"99999999999999" env-description:"On which epoch to start accepting operators all peers"` - Permissioned func() bool // this is not loaded from config file but set up in full node setup - // WhitelistedOperatorKeys is an array of Operator Public Key PEMs not registered in the contract with which the node will accept connections - WhitelistedOperatorKeys []string `yaml:"WhitelistedOperatorKeys" env:"WHITELISTED_KEYS" env-description:"Operators' keys not registered in the contract with which the node will accept connections"` + + // PeerScoreInspector is called periodically to inspect the peer scores. + PeerScoreInspector func(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) + + // PeerScoreInspectorInterval is the interval at which the PeerScoreInspector is called. + PeerScoreInspectorInterval time.Duration } // Libp2pOptions creates options list for the libp2p host @@ -89,7 +104,7 @@ func (c *Config) Libp2pOptions(logger *zap.Logger) ([]libp2p.Option, error) { if c.NetworkPrivateKey == nil { return nil, errors.New("could not create options w/o network key") } - sk, err := commons.ConvertToInterfacePrivkey(c.NetworkPrivateKey) + sk, err := commons.ECDSAPrivToInterface(c.NetworkPrivateKey) if err != nil { return nil, errors.Wrap(err, "could not convert to interface priv key") } diff --git a/network/p2p/metrics.go b/network/p2p/metrics.go index 843374774b..10ba41304e 100644 --- a/network/p2p/metrics.go +++ b/network/p2p/metrics.go @@ -32,7 +32,7 @@ var ( metricsRouterIncoming = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:network:router:in", Help: "Counts incoming messages", - }, []string{"identifier", "mt"}) + }, []string{"mt"}) ) func init() { @@ -105,8 +105,8 @@ func (n *p2pNetwork) reportPeerIdentity(logger *zap.Logger, pid peer.ID) { } } - if pubKey, ok := n.operatorPKCache.Load(opPKHash); ok { - operatorData, found, opDataErr := n.nodeStorage.GetOperatorDataByPubKey(nil, pubKey.([]byte)) + if pubKey, ok := n.operatorPKHashToPKCache.Get(opPKHash); ok { + operatorData, found, opDataErr := n.nodeStorage.GetOperatorDataByPubKey(nil, pubKey) if opDataErr == nil && found { opID = strconv.FormatUint(operatorData.ID, 10) } @@ -118,7 +118,7 @@ func (n *p2pNetwork) reportPeerIdentity(logger *zap.Logger, pid peer.ID) { for _, operator := range operators { pubKeyHash := format.OperatorID(operator.PublicKey) - n.operatorPKCache.Store(pubKeyHash, operator.PublicKey) + n.operatorPKHashToPKCache.Set(pubKeyHash, operator.PublicKey) if pubKeyHash == opPKHash { opID = strconv.FormatUint(operator.ID, 10) } diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 4f27098061..9ee5c04126 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -2,29 +2,28 @@ package p2pv1 import ( "context" - "sync" + "crypto/rsa" "sync/atomic" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - connmgrcore "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" libp2pdiscbackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections" "github.com/bloxapp/ssv/network/records" "github.com/bloxapp/ssv/network/streams" - "github.com/bloxapp/ssv/network/syncing" "github.com/bloxapp/ssv/network/topics" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/utils/async" @@ -40,7 +39,7 @@ const ( ) const ( - connManagerGCInterval = time.Minute + connManagerGCInterval = 3 * time.Minute connManagerGCTimeout = time.Minute peersReportingInterval = 60 * time.Second peerIdentitiesReportingInterval = 5 * time.Minute @@ -56,14 +55,15 @@ type p2pNetwork struct { interfaceLogger *zap.Logger // struct logger to log in interface methods that do not accept a logger cfg *Config - host host.Host - streamCtrl streams.StreamController - idx peers.Index - disc discovery.Service - topicsCtrl topics.Controller - msgRouter network.MessageRouter - msgResolver topics.MsgPeersResolver - connHandler connections.ConnHandler + host host.Host + streamCtrl streams.StreamController + idx peers.Index + disc discovery.Service + topicsCtrl topics.Controller + msgRouter network.MessageRouter + msgResolver topics.MsgPeersResolver + msgValidator validation.MessageValidator + connHandler connections.ConnHandler state int32 @@ -72,9 +72,11 @@ type p2pNetwork struct { backoffConnector *libp2pdiscbackoff.BackoffConnector subnets []byte libConnManager connmgrcore.ConnManager - syncer syncing.Syncer - nodeStorage operatorstorage.Storage - operatorPKCache sync.Map + + nodeStorage operatorstorage.Storage + operatorPKHashToPKCache *hashmap.Map[string, []byte] // used for metrics + operatorPrivateKey *rsa.PrivateKey + operatorID func() spectypes.OperatorID } // New creates a new p2p network @@ -84,16 +86,19 @@ func New(logger *zap.Logger, cfg *Config) network.P2PNetwork { logger = logger.Named(logging.NameP2PNetwork) return &p2pNetwork{ - parentCtx: cfg.Ctx, - ctx: ctx, - cancel: cancel, - interfaceLogger: logger, - cfg: cfg, - msgRouter: cfg.Router, - state: stateClosed, - activeValidators: hashmap.New[string, validatorStatus](), - nodeStorage: cfg.NodeStorage, - operatorPKCache: sync.Map{}, + parentCtx: cfg.Ctx, + ctx: ctx, + cancel: cancel, + interfaceLogger: logger, + cfg: cfg, + msgRouter: cfg.Router, + msgValidator: cfg.MessageValidator, + state: stateClosed, + activeValidators: hashmap.New[string, validatorStatus](), + nodeStorage: cfg.NodeStorage, + operatorPKHashToPKCache: hashmap.New[string, []byte](), + operatorPrivateKey: cfg.OperatorPrivateKey, + operatorID: cfg.OperatorID, } } @@ -160,22 +165,19 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { go n.startDiscovery(logger) async.Interval(n.ctx, connManagerGCInterval, n.peersBalancing(logger)) + // don't report metrics in tests + if n.cfg.Metrics != nil { + async.Interval(n.ctx, peersReportingInterval, n.reportAllPeers(logger)) - async.Interval(n.ctx, peersReportingInterval, n.reportAllPeers(logger)) + async.Interval(n.ctx, peerIdentitiesReportingInterval, n.reportPeerIdentities(logger)) - async.Interval(n.ctx, peerIdentitiesReportingInterval, n.reportPeerIdentities(logger)) - - async.Interval(n.ctx, topicsReportingInterval, n.reportTopics(logger)) + async.Interval(n.ctx, topicsReportingInterval, n.reportTopics(logger)) + } if err := n.subscribeToSubnets(logger); err != nil { return err } - // Create & start ConcurrentSyncer. - syncer := syncing.NewConcurrent(n.ctx, syncing.New(n), 16, syncing.DefaultTimeouts, nil) - go syncer.Run(logger) - n.syncer = syncer - return nil } diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 708deb79d3..1ec65082a6 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -1,9 +1,15 @@ package p2pv1 import ( + "context" + "crypto" + "crypto/rsa" + "crypto/sha256" "encoding/hex" "fmt" + "github.com/bloxapp/ssv/protocol/v2/message" + spectypes "github.com/bloxapp/ssv-spec/types" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" @@ -11,12 +17,11 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/records" - - "github.com/bloxapp/ssv/network" - "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) type validatorStatus int @@ -52,16 +57,27 @@ func (n *p2pNetwork) Broadcast(msg *spectypes.SSVMessage) error { return p2pprotocol.ErrNetworkIsNotReady } - raw, err := commons.EncodeNetworkMsg(msg) + encodedMsg, err := commons.EncodeNetworkMsg(msg) if err != nil { return errors.Wrap(err, "could not decode msg") } + if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.PermissionlessActivationEpoch { + hash := sha256.Sum256(encodedMsg) + + signature, err := rsa.SignPKCS1v15(nil, n.operatorPrivateKey, crypto.SHA256, hash[:]) + if err != nil { + return err + } + + encodedMsg = commons.EncodeSignedSSVMessage(encodedMsg, n.operatorID(), signature) + } + vpk := msg.GetID().GetPubKey() topics := commons.ValidatorTopicID(vpk) for _, topic := range topics { - if err := n.topicsCtrl.Broadcast(topic, raw, n.cfg.RequestTimeout); err != nil { + if err := n.topicsCtrl.Broadcast(topic, encodedMsg, n.cfg.RequestTimeout); err != nil { n.interfaceLogger.Debug("could not broadcast msg", fields.PubKey(vpk), zap.Error(err)) return errors.Wrap(err, "could not broadcast msg") } @@ -133,8 +149,8 @@ func (n *p2pNetwork) subscribe(logger *zap.Logger, pk spectypes.ValidatorPK) err } // handleIncomingMessages reads messages from the given channel and calls the router, note that this function blocks. -func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, msg *pubsub.Message) error { - return func(topic string, msg *pubsub.Message) error { +func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(ctx context.Context, topic string, msg *pubsub.Message) error { + return func(ctx context.Context, topic string, msg *pubsub.Message) error { if n.msgRouter == nil { logger.Debug("msg router is not configured") return nil @@ -143,26 +159,28 @@ func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, return nil } - var ssvMsg *spectypes.SSVMessage + var decodedMsg *queue.DecodedSSVMessage if msg.ValidatorData != nil { - m, ok := msg.ValidatorData.(spectypes.SSVMessage) + m, ok := msg.ValidatorData.(*queue.DecodedSSVMessage) if ok { - ssvMsg = &m + decodedMsg = m } } - if ssvMsg == nil { + if decodedMsg == nil { return errors.New("message was not decoded") } - p2pID := ssvMsg.GetID().String() + //p2pID := decodedMsg.GetID().String() // logger.With( // zap.String("pubKey", hex.EncodeToString(ssvMsg.MsgID.GetPubKey())), // zap.String("role", ssvMsg.MsgID.GetRoleType().String()), // ).Debug("handlePubsubMessages") - metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(ssvMsg.MsgType)).Inc() - n.msgRouter.Route(logger, *ssvMsg) + metricsRouterIncoming.WithLabelValues(message.MsgTypeToString(decodedMsg.MsgType)).Inc() + + n.msgRouter.Route(ctx, decodedMsg) + return nil } } diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 8ffe70656b..f680096f8b 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -161,7 +161,7 @@ func (n *p2pNetwork) setupStreamCtrl(logger *zap.Logger) error { } func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { - libPrivKey, err := p2pcommons.ConvertToInterfacePrivkey(n.cfg.NetworkPrivateKey) + libPrivKey, err := p2pcommons.ECDSAPrivToInterface(n.cfg.NetworkPrivateKey) if err != nil { return err } @@ -169,7 +169,6 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { domain := "0x" + hex.EncodeToString(n.cfg.Network.Domain[:]) self := records.NewNodeInfo(domain) self.Metadata = &records.NodeMetadata{ - OperatorID: n.cfg.OperatorID, NodeVersion: commons.GetNodeVersion(), Subnets: records.Subnets(n.subnets).String(), } @@ -204,7 +203,7 @@ func (n *p2pNetwork) setupPeerServices(logger *zap.Logger) error { filters = append(filters, connections.SenderRecipientIPsCheckFilter(n.host.ID()), connections.SignatureCheckFilter(), - connections.RegisteredOperatorsFilter(n.nodeStorage, n.cfg.WhitelistedOperatorKeys)) + connections.RegisteredOperatorsFilter(n.nodeStorage, n.cfg.Network.WhitelistedOperatorKeys)) } return filters } @@ -246,7 +245,6 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { TCPPort: n.cfg.TCPPort, NetworkKey: n.cfg.NetworkPrivateKey, Bootnodes: n.cfg.TransformBootnodes(), - OperatorID: n.cfg.OperatorID, EnableLogging: n.cfg.DiscoveryTrace, } if len(n.subnets) > 0 { @@ -263,6 +261,7 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { SubnetsIdx: n.idx, HostAddress: n.cfg.HostAddress, HostDNS: n.cfg.HostDNS, + DomainType: n.cfg.Network.Domain, } disc, err := discovery.NewService(n.ctx, logger, discOpts) if err != nil { @@ -276,14 +275,12 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { } func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { - cfg := &topics.PububConfig{ - Host: n.host, - TraceLog: n.cfg.PubSubTrace, - MsgValidatorFactory: func(s string) topics.MsgValidatorFunc { - return topics.NewSSVMsgValidator() - }, - MsgHandler: n.handlePubsubMessages(logger), - ScoreIndex: n.idx, + cfg := &topics.PubSubConfig{ + Host: n.host, + TraceLog: n.cfg.PubSubTrace, + MsgValidator: n.msgValidator, + MsgHandler: n.handlePubsubMessages(logger), + ScoreIndex: n.idx, //Discovery: n.disc, OutboundQueueSize: n.cfg.PubsubOutQueueSize, ValidationQueueSize: n.cfg.PubsubValidationQueueSize, @@ -292,20 +289,27 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { GetValidatorStats: n.cfg.GetValidatorStats, } + if n.cfg.PeerScoreInspector != nil && n.cfg.PeerScoreInspectorInterval > 0 { + cfg.ScoreInspector = n.cfg.PeerScoreInspector + cfg.ScoreInspectorInterval = n.cfg.PeerScoreInspectorInterval + } + if !n.cfg.PubSubScoring { cfg.ScoreIndex = nil } - midHandler := topics.NewMsgIDHandler(n.ctx, time.Minute*2) + midHandler := topics.NewMsgIDHandler(n.ctx, time.Minute*2, n.cfg.Network) n.msgResolver = midHandler cfg.MsgIDHandler = midHandler go cfg.MsgIDHandler.Start() // run GC every 3 minutes to clear old messages async.RunEvery(n.ctx, time.Minute*3, midHandler.GC) - _, tc, err := topics.NewPubsub(n.ctx, logger, cfg) + + _, tc, err := topics.NewPubSub(n.ctx, logger, cfg) if err != nil { return errors.Wrap(err, "could not setup pubsub") } + n.topicsCtrl = tc logger.Debug("topics controller is ready") return nil diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 6b810c7d41..73c4b443b3 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -1,143 +1,25 @@ package p2pv1 import ( - "context" "encoding/hex" "fmt" "math/rand" "time" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - - "github.com/multiformats/go-multistream" - - "github.com/bloxapp/ssv-spec/qbft" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" libp2p_protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multistream" "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) -func (n *p2pNetwork) SyncHighestDecided(mid spectypes.MessageID) error { - return n.syncer.SyncHighestDecided(context.Background(), n.interfaceLogger, mid, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) - }) -} - -func (n *p2pNetwork) SyncDecidedByRange(mid spectypes.MessageID, from, to qbft.Height) { - if !n.cfg.FullNode { - return - } - // TODO: uncomment to fix syncing bug! - // if from < to { - // n.logger.Warn("failed to sync decided by range: from is greater than to", - // zap.String("pubkey", hex.EncodeToString(mid.GetPubKey())), - // zap.String("role", mid.GetRoleType().String()), - // zap.Uint64("from", uint64(from)), - // zap.Uint64("to", uint64(to))) - // return - // } - if to > from { - n.interfaceLogger.Warn("failed to sync decided by range: to is higher than from", - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - return - } - - // TODO: this is a temporary solution to prevent syncing already decided heights. - // Example: Say we received a decided at height 99, and right after we received a decided at height 100 - // before we could advance the controller's height. This would cause the controller to call SyncDecidedByRange. - // However, height 99 is already synced, so temporarily we reject such requests here. - // Note: This isn't ideal because sometimes you do want to sync gaps of 1. - const minGap = 2 - if to-from < minGap { - return - } - - err := n.syncer.SyncDecidedByRange(context.Background(), n.interfaceLogger, mid, from, to, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) - }) - if err != nil { - n.interfaceLogger.Error("failed to sync decided by range", zap.Error(err)) - } -} - -// LastDecided fetches last decided from a random set of peers -func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { - const ( - minPeers = 3 - waitTime = time.Second * 24 - ) - if !n.isReady() { - return nil, p2pprotocol.ErrNetworkIsNotReady - } - pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) - peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) - if err != nil { - return nil, errors.Wrap(err, "could not get subset of peers") - } - return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ - Params: &message.SyncParams{ - Identifier: mid, - }, - Protocol: message.LastDecidedType, - }) -} - -// GetHistory sync the given range from a set of peers that supports history for the given identifier -func (n *p2pNetwork) GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]p2pprotocol.SyncResult, specqbft.Height, error) { - if from >= to { - return nil, 0, nil - } - - if !n.isReady() { - return nil, 0, p2pprotocol.ErrNetworkIsNotReady - } - protocolID, peerCount := commons.ProtocolID(p2pprotocol.DecidedHistoryProtocol) - peers := make([]peer.ID, 0) - for _, t := range targets { - p, err := peer.Decode(t) - if err != nil { - continue - } - peers = append(peers, p) - } - // if no peers were provided -> select a random set of peers - if len(peers) == 0 { - random, err := n.getSubsetOfPeers(logger, mid.GetPubKey(), peerCount, n.peersWithProtocolsFilter(protocolID)) - if err != nil { - return nil, 0, errors.Wrap(err, "could not get subset of peers") - } - peers = random - } - maxBatchRes := specqbft.Height(n.cfg.MaxBatchResponse) - - var results []p2pprotocol.SyncResult - var err error - currentEnd := to - if to-from > maxBatchRes { - currentEnd = from + maxBatchRes - } - results, err = n.makeSyncRequest(logger, peers, mid, protocolID, &message.SyncMessage{ - Params: &message.SyncParams{ - Height: []specqbft.Height{from, currentEnd}, - Identifier: mid, - }, - Protocol: message.DecidedHistoryType, - }) - if err != nil { - return results, 0, err - } - return results, currentEnd, nil -} - // RegisterHandlers registers the given handlers func (n *p2pNetwork) RegisterHandlers(logger *zap.Logger, handlers ...*p2pprotocol.SyncHandler) { m := make(map[libp2p_protocol.ID][]p2pprotocol.RequestHandler) @@ -175,21 +57,26 @@ func (n *p2pNetwork) handleStream(logger *zap.Logger, handler p2pprotocol.Reques if err != nil { return errors.Wrap(err, "could not handle stream") } + smsg, err := commons.DecodeNetworkMsg(req) if err != nil { return errors.Wrap(err, "could not decode msg from stream") } + result, err := handler(smsg) if err != nil { return errors.Wrap(err, "could not handle msg from stream") } + resultBytes, err := commons.EncodeNetworkMsg(result) if err != nil { return errors.Wrap(err, "could not encode msg") } + if err := respond(resultBytes); err != nil { return errors.Wrap(err, "could not respond to stream") } + return nil } } @@ -232,6 +119,7 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp if err != nil { return nil, errors.Wrap(err, "could not encode sync message") } + msg := &spectypes.SSVMessage{ MsgType: message.SSVSyncMsgType, MsgID: mid, @@ -241,11 +129,13 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp if err != nil { return nil, err } + logger = logger.With(zap.String("protocol", string(protocol))) msgID := commons.MsgID() distinct := make(map[string]struct{}) for _, pid := range peers { logger := logger.With(fields.PeerID(pid)) + raw, err := n.streamCtrl.Request(logger, pid, protocol, encoded) if err != nil { // TODO: is this how to check for ErrNotSupported? @@ -255,16 +145,28 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp } continue } + + if n.cfg.Network.Beacon.EstimatedCurrentEpoch() > n.cfg.Network.PermissionlessActivationEpoch { + decodedMsg, _, _, err := commons.DecodeSignedSSVMessage(raw) + if err != nil { + logger.Debug("could not decode signed SSV message", zap.Error(err)) + } else { + raw = decodedMsg + } + } + mid := msgID(raw) if _, ok := distinct[mid]; ok { continue } distinct[mid] = struct{}{} + res, err := commons.DecodeNetworkMsg(raw) if err != nil { logger.Debug("could not decode stream response", zap.Error(err)) continue } + results = append(results, p2pprotocol.SyncResult{ Msg: res, Sender: pid.String(), @@ -274,6 +176,8 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp } // peersWithProtocolsFilter is used to accept peers that supports the given protocols +// +//nolint:unused func (n *p2pNetwork) peersWithProtocolsFilter(protocols ...libp2p_protocol.ID) func(peer.ID) bool { return func(id peer.ID) bool { supported, err := n.host.Network().Peerstore().SupportsProtocols(id, protocols...) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 9fc132d0ff..208a8bed4a 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -2,13 +2,24 @@ package p2pv1 import ( "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" "encoding/hex" + "encoding/pem" + "fmt" "sync" "sync/atomic" "testing" "time" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -18,10 +29,57 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/network" - protcolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/protocol/v2/types" + p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) +func TestRSAUsage(t *testing.T) { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err) + + testMessage := []byte("message") + + hash := sha256.Sum256(testMessage) + + signature, err := rsa.SignPKCS1v15(nil, privateKey, crypto.SHA256, hash[:]) + require.NoError(t, err) + + publicKey := &privateKey.PublicKey + + pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + fmt.Println("Error marshalling public key:", err) + return + } + + pubPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PUBLIC KEY", + Bytes: pubKeyBytes, + }) + + const operatorID = spectypes.OperatorID(0x12345678) + encodedSignedSSVMessage := commons.EncodeSignedSSVMessage(testMessage, operatorID, signature) + + decodedMessage, decodedOperatorID, decodedSignature, err := commons.DecodeSignedSSVMessage(encodedSignedSSVMessage) + require.NoError(t, err) + require.Equal(t, operatorID, decodedOperatorID) + require.Equal(t, signature, decodedSignature) + + messageHash := sha256.Sum256(decodedMessage) + + block, rest := pem.Decode(pubPEM) + require.NotNil(t, block) + require.Empty(t, rest, "extra data after PEM decoding") + + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + require.NoError(t, err) + + rsaPubKey, ok := pub.(*rsa.PublicKey) + require.True(t, ok) + + require.NoError(t, rsa.VerifyPKCS1v15(rsaPubKey, crypto.SHA256, messageHash[:], decodedSignature)) + require.Equal(t, testMessage, decodedMessage) +} + func TestGetMaxPeers(t *testing.T) { n := &p2pNetwork{ cfg: &Config{MaxPeers: 40, TopicMaxPeers: 8}, @@ -38,22 +96,29 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170"} - - ln, routers, err := createNetworkAndSubscribe(t, ctx, n, pks...) + ln, routers, err := createNetworkAndSubscribe(t, ctx, LocalNetOptions{ + Nodes: n, + MinConnected: n/2 - 1, + UseDiscv5: false, + }, pks...) require.NoError(t, err) require.NotNil(t, routers) require.NotNil(t, ln) + defer func() { + for _, node := range ln.Nodes { + require.NoError(t, node.(*p2pNetwork).Close()) + } + }() + node1, node2 := ln.Nodes[1], ln.Nodes[2] var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - msg1, err := dummyMsg(pks[0], 1) - require.NoError(t, err) - msg3, err := dummyMsg(pks[0], 3) - require.NoError(t, err) + msg1 := dummyMsgAttester(t, pks[0], 1) + msg3 := dummyMsgAttester(t, pks[0], 3) require.NoError(t, node1.Broadcast(msg1)) <-time.After(time.Millisecond * 10) require.NoError(t, node2.Broadcast(msg3)) @@ -64,11 +129,9 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - msg1, err := dummyMsg(pks[0], 1) - require.NoError(t, err) - msg2, err := dummyMsg(pks[1], 2) - require.NoError(t, err) - msg3, err := dummyMsg(pks[0], 3) + msg1 := dummyMsgAttester(t, pks[0], 1) + msg2 := dummyMsgAttester(t, pks[1], 2) + msg3 := dummyMsgAttester(t, pks[0], 3) require.NoError(t, err) <-time.After(time.Millisecond * 10) require.NoError(t, node1.Broadcast(msg2)) @@ -98,10 +161,6 @@ func TestP2pNetwork_SubscribeBroadcast(t *testing.T) { } <-time.After(time.Millisecond * 10) - - for _, node := range ln.Nodes { - require.NoError(t, node.(*p2pNetwork).Close()) - } } func TestP2pNetwork_Stream(t *testing.T) { @@ -112,13 +171,23 @@ func TestP2pNetwork_Stream(t *testing.T) { pkHex := "b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400" - ln, _, err := createNetworkAndSubscribe(t, ctx, n, pkHex) + ln, _, err := createNetworkAndSubscribe(t, ctx, LocalNetOptions{ + Nodes: n, + MinConnected: n/2 - 1, + UseDiscv5: false, + }, pkHex) + + defer func() { + for _, node := range ln.Nodes { + require.NoError(t, node.(*p2pNetwork).Close()) + } + }() require.NoError(t, err) require.Len(t, ln.Nodes, n) pk, err := hex.DecodeString(pkHex) require.NoError(t, err) - mid := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + mid := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) rounds := []specqbft.Round{ 1, 1, 1, 1, 2, 2, @@ -140,7 +209,7 @@ func TestP2pNetwork_Stream(t *testing.T) { <-time.After(time.Second) node := ln.Nodes[0] - res, err := node.LastDecided(logger, mid) + res, err := node.(*p2pNetwork).LastDecided(logger, mid) require.NoError(t, err) select { case err := <-errors: @@ -150,6 +219,7 @@ func TestP2pNetwork_Stream(t *testing.T) { require.GreaterOrEqual(t, len(res), 2) // got at least 2 results require.LessOrEqual(t, len(res), 6) // less than 6 unique heights require.GreaterOrEqual(t, msgCounter, int64(2)) + } func TestWaitSubsetOfPeers(t *testing.T) { @@ -205,9 +275,30 @@ func TestWaitSubsetOfPeers(t *testing.T) { } } +func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { + const ( + minPeers = 3 + waitTime = time.Second * 24 + ) + if !n.isReady() { + return nil, p2pprotocol.ErrNetworkIsNotReady + } + pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) + peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) + if err != nil { + return nil, errors.Wrap(err, "could not get subset of peers") + } + return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ + Params: &message.SyncParams{ + Identifier: mid, + }, + Protocol: message.LastDecidedType, + }) +} + func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes.MessageID, height specqbft.Height, round specqbft.Round, counter *int64, errors chan<- error) { - node.RegisterHandlers(logger, &protcolp2p.SyncHandler{ - Protocol: protcolp2p.LastDecidedProtocol, + node.RegisterHandlers(logger, &p2pprotocol.SyncHandler{ + Protocol: p2pprotocol.LastDecidedProtocol, Handler: func(message *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { atomic.AddInt64(counter, 1) sm := specqbft.SignedMessage{ @@ -235,21 +326,24 @@ func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes. }) } -func createNetworkAndSubscribe(t *testing.T, ctx context.Context, n int, pks ...string) (*LocalNet, []*dummyRouter, error) { - logger := logging.TestLogger(t) - ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), n, n/2-1, false) +func createNetworkAndSubscribe(t *testing.T, ctx context.Context, options LocalNetOptions, pks ...string) (*LocalNet, []*dummyRouter, error) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), options) if err != nil { return nil, nil, err } - if len(ln.Nodes) != n { - return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), n) + if len(ln.Nodes) != options.Nodes { + return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), options.Nodes) } logger.Debug("created local network") - routers := make([]*dummyRouter, n) + routers := make([]*dummyRouter, options.Nodes) for i, node := range ln.Nodes { - routers[i] = &dummyRouter{i: i} + routers[i] = &dummyRouter{ + i: i, + } node.UseMessageRouter(routers[i]) } @@ -299,17 +393,14 @@ type dummyRouter struct { i int } -func (r *dummyRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - c := atomic.AddUint64(&r.count, 1) - logger.Debug("got message", zap.Uint64("count", c)) +func (r *dummyRouter) Route(_ context.Context, _ *queue.DecodedSSVMessage) { + atomic.AddUint64(&r.count, 1) } -func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { +func dummyMsg(t *testing.T, pkHex string, height int, role spectypes.BeaconRole) *spectypes.SSVMessage { pk, err := hex.DecodeString(pkHex) - if err != nil { - return nil, err - } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + require.NoError(t, err) + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, role) signedMsg := &specqbft.SignedMessage{ Message: specqbft.Message{ MsgType: specqbft.CommitMsgType, @@ -322,12 +413,14 @@ func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { Signers: []spectypes.OperatorID{1, 3, 4}, } data, err := signedMsg.Encode() - if err != nil { - return nil, err - } + require.NoError(t, err) return &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, Data: data, - }, nil + } +} + +func dummyMsgAttester(t *testing.T, pkHex string, height int) *spectypes.SSVMessage { + return dummyMsg(t, pkHex, height, spectypes.BNRoleAttester) } diff --git a/network/p2p/p2p_validation_test.go b/network/p2p/p2p_validation_test.go new file mode 100644 index 0000000000..2d8292a9ed --- /dev/null +++ b/network/p2p/p2p_validation_test.go @@ -0,0 +1,372 @@ +package p2pv1 + +import ( + "context" + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "github.com/cornelk/hashmap" + "os" + "sort" + "sync" + "sync/atomic" + "testing" + "time" + + "math/rand" + + "github.com/aquasecurity/table" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcegraph/conc/pool" + "github.com/stretchr/testify/require" +) + +// TestP2pNetwork_MessageValidation tests p2pNetwork would score peers according +// to the validity of the messages they broadcast. +// +// This test creates 4 nodes, each fulfilling a different role by broadcasting +// messages that would be accepted, ignored or rejected by the other nodes, +// and finally asserts that each node scores it's peers according to their +// played role (accepted > ignored > rejected). +func TestP2pNetwork_MessageValidation(t *testing.T) { + const ( + nodeCount = 4 + validatorCount = 20 + ) + var vNet *VirtualNet + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create 20 fake validator public keys. + validators := make([]string, validatorCount) + for i := 0; i < validatorCount; i++ { + var validator [48]byte + cryptorand.Read(validator[:]) + validators[i] = hex.EncodeToString(validator[:]) + } + + // Create a MessageValidator to accept/reject/ignore messages according to their role type. + const ( + acceptedRole = spectypes.BNRoleProposer + ignoredRole = spectypes.BNRoleAttester + rejectedRole = spectypes.BNRoleSyncCommittee + ) + messageValidators := make([]*MockMessageValidator, nodeCount) + var mtx sync.Mutex + for i := 0; i < nodeCount; i++ { + i := i + messageValidators[i] = &MockMessageValidator{ + Accepted: make([]int, nodeCount), + Ignored: make([]int, nodeCount), + Rejected: make([]int, nodeCount), + } + messageValidators[i].ValidateFunc = func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + peer := vNet.NodeByPeerID(p) + + msg, err := commons.DecodeNetworkMsg(pmsg.Data) + require.NoError(t, err) + decodedMsg, err := queue.DecodeSSVMessage(msg) + require.NoError(t, err) + pmsg.ValidatorData = decodedMsg + mtx.Lock() + // Validation according to role. + var validation pubsub.ValidationResult + switch msg.MsgID.GetRoleType() { + case acceptedRole: + messageValidators[i].Accepted[peer.Index]++ + messageValidators[i].TotalAccepted++ + validation = pubsub.ValidationAccept + case ignoredRole: + messageValidators[i].Ignored[peer.Index]++ + messageValidators[i].TotalIgnored++ + validation = pubsub.ValidationIgnore + case rejectedRole: + messageValidators[i].Rejected[peer.Index]++ + messageValidators[i].TotalRejected++ + validation = pubsub.ValidationReject + default: + panic("unsupported role") + } + mtx.Unlock() + + // Always accept messages from self to make libp2p propagate them, + // while still counting them by their role. + if p == vNet.Nodes[i].Network.Host().ID() { + return pubsub.ValidationAccept + } + + return validation + } + } + + // Create a VirtualNet with 4 nodes. + vNet = CreateVirtualNet(t, ctx, 4, validators, func(nodeIndex int) validation.MessageValidator { + return messageValidators[nodeIndex] + }) + defer func() { + require.NoError(t, vNet.Close()) + }() + + // Prepare a pool of broadcasters. + mu := sync.Mutex{} + height := atomic.Int64{} + roleBroadcasts := map[spectypes.BeaconRole]int{} + broadcasters := pool.New().WithErrors().WithContext(ctx) + broadcaster := func(node *VirtualNode, roles ...spectypes.BeaconRole) { + broadcasters.Go(func(ctx context.Context) error { + for i := 0; i < 50; i++ { + role := roles[i%len(roles)] + + mu.Lock() + roleBroadcasts[role]++ + mu.Unlock() + + msg := dummyMsg(t, validators[rand.Intn(len(validators))], int(height.Add(1)), role) + err := node.Broadcast(msg) + if err != nil { + return err + } + time.Sleep(10 * time.Millisecond) + } + return nil + }) + } + + // Broadcast the messages: + // - node 0 broadcasts accepted messages. + // - node 1 broadcasts ignored messages. + // - node 2 broadcasts rejected messages. + // - node 3 broadcasts all messages (equal distribution). + broadcaster(vNet.Nodes[0], acceptedRole) + broadcaster(vNet.Nodes[1], ignoredRole) + broadcaster(vNet.Nodes[2], rejectedRole) + broadcaster(vNet.Nodes[3], acceptedRole, ignoredRole, rejectedRole) + + // Wait for the broadcasters to finish. + err := broadcasters.Wait() + require.NoError(t, err) + time.Sleep(500 * time.Millisecond) + + // Assert that the messages were distributed as expected. + deadline := time.Now().Add(5 * time.Second) + interval := 100 * time.Millisecond + for i := 0; i < nodeCount; i++ { + // better lock inside loop than wait interval locked + mtx.Lock() + var errors []error + if roleBroadcasts[acceptedRole] != messageValidators[i].TotalAccepted { + errors = append(errors, fmt.Errorf("node %d accepted %d messages (expected %d)", i, messageValidators[i].TotalAccepted, roleBroadcasts[acceptedRole])) + } + if roleBroadcasts[ignoredRole] != messageValidators[i].TotalIgnored { + errors = append(errors, fmt.Errorf("node %d ignored %d messages (expected %d)", i, messageValidators[i].TotalIgnored, roleBroadcasts[ignoredRole])) + } + if roleBroadcasts[rejectedRole] != messageValidators[i].TotalRejected { + errors = append(errors, fmt.Errorf("node %d rejected %d messages (expected %d)", i, messageValidators[i].TotalRejected, roleBroadcasts[rejectedRole])) + } + mtx.Unlock() + if len(errors) == 0 { + break + } + if time.Now().After(deadline) { + require.Empty(t, errors) + } + time.Sleep(interval) + } + + // Assert that each node scores it's peers according to the following order: + // - node 0, (node 1 OR 3), (node 1 OR 3), node 2 + // (after excluding itself from this list) + for _, node := range vNet.Nodes { + node := node + + // Prepare the valid orders, excluding the node itself. + validOrders := [][]NodeIndex{ + {0, 1, 3, 2}, + {0, 3, 1, 2}, + } + for i, validOrder := range validOrders { + for j, index := range validOrder { + if index == node.Index { + validOrders[i] = append(validOrders[i][:j], validOrders[i][j+1:]...) + break + } + } + } + + // Sort peers by their scores. + type peerScore struct { + index NodeIndex + score float64 + } + peers := make([]peerScore, 0, node.PeerScores.Len()) + node.PeerScores.Range(func(index NodeIndex, snapshot *pubsub.PeerScoreSnapshot) bool { + peers = append(peers, peerScore{index, snapshot.Score}) + return true + }) + sort.Slice(peers, func(i, j int) bool { + return peers[i].score > peers[j].score + }) + + // Print a pretty table of each node's peers and their scores. + defer func() { + tbl := table.New(os.Stdout) + tbl.SetHeaders("Peer", "Score", "Accepted", "Ignored", "Rejected") + mtx.Lock() + for _, peer := range peers { + tbl.AddRow( + fmt.Sprintf("%d", peer.index), + fmt.Sprintf("%.2f", peer.score), + fmt.Sprintf("%d", messageValidators[node.Index].Accepted[peer.index]), + fmt.Sprintf("%d", messageValidators[node.Index].Ignored[peer.index]), + fmt.Sprintf("%d", messageValidators[node.Index].Rejected[peer.index]), + ) + } + mtx.Unlock() + fmt.Println() + fmt.Printf("Peer Scores (Node %d)\n", node.Index) + tbl.Render() + }() + + // Assert that the peers are in one of the valid orders. + require.Equal(t, len(vNet.Nodes)-1, len(peers), "node %d", node.Index) + for i, validOrder := range validOrders { + valid := true + for j, peer := range peers { + if peer.index != validOrder[j] { + valid = false + break + } + } + if valid { + break + } + if i == len(validOrders)-1 { + require.Fail(t, "invalid order", "node %d", node.Index) + } + } + } + defer fmt.Println() +} + +type MockMessageValidator struct { + Accepted []int + Ignored []int + Rejected []int + TotalAccepted int + TotalIgnored int + TotalRejected int + + ValidateFunc func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} + +func (v *MockMessageValidator) ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return v.ValidatePubsubMessage +} + +func (v *MockMessageValidator) ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return v.ValidateFunc(ctx, p, pmsg) +} + +func (v *MockMessageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, validation.Descriptor, error) { + panic("not implemented") // TODO: Implement +} + +type NodeIndex int + +type VirtualNode struct { + Index NodeIndex + Network *p2pNetwork + PeerScores *hashmap.Map[NodeIndex, *pubsub.PeerScoreSnapshot] +} + +func (n *VirtualNode) Broadcast(msg *spectypes.SSVMessage) error { + return n.Network.Broadcast(msg) +} + +// VirtualNet is a utility to create & interact with a virtual network of nodes. +type VirtualNet struct { + Nodes []*VirtualNode +} + +func CreateVirtualNet( + t *testing.T, + ctx context.Context, + nodes int, + validatorPubKeys []string, + messageValidatorProvider func(int) validation.MessageValidator, +) *VirtualNet { + var doneSetup atomic.Bool + vn := &VirtualNet{} + ln, routers, err := createNetworkAndSubscribe(t, ctx, LocalNetOptions{ + Nodes: nodes, + MinConnected: nodes - 1, + UseDiscv5: false, + TotalValidators: 1000, + ActiveValidators: 800, + MyValidators: 300, + MessageValidatorProvider: messageValidatorProvider, + PeerScoreInspector: func(selfPeer peer.ID, peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) { + if !doneSetup.Load() { + return + } + node := vn.NodeByPeerID(selfPeer) + if node == nil { + t.Fatalf("self peer not found (%s)", selfPeer) + } + + node.PeerScores.Range(func(index NodeIndex, snapshot *pubsub.PeerScoreSnapshot) bool { + node.PeerScores.Del(index) + return true + }) + for peerID, peerScore := range peerMap { + peerNode := vn.NodeByPeerID(peerID) + if peerNode == nil { + t.Fatalf("peer not found (%s)", peerID) + } + node.PeerScores.Set(peerNode.Index, peerScore) + } + + }, + PeerScoreInspectorInterval: time.Millisecond * 5, + }, validatorPubKeys...) + + require.NoError(t, err) + require.NotNil(t, routers) + require.NotNil(t, ln) + + for i, node := range ln.Nodes { + vn.Nodes = append(vn.Nodes, &VirtualNode{ + Index: NodeIndex(i), + Network: node.(*p2pNetwork), + PeerScores: hashmap.New[NodeIndex, *pubsub.PeerScoreSnapshot](), //{}make(map[NodeIndex]*pubsub.PeerScoreSnapshot), + }) + } + doneSetup.Store(true) + + return vn +} + +func (vn *VirtualNet) NodeByPeerID(peerID peer.ID) *VirtualNode { + for _, node := range vn.Nodes { + if node.Network.Host().ID() == peerID { + return node + } + } + return nil +} + +func (vn *VirtualNet) Close() error { + for _, node := range vn.Nodes { + err := node.Network.Close() + if err != nil { + return err + } + } + return nil +} diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index bcfa9ad311..7244f22a18 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -2,22 +2,25 @@ package p2pv1 import ( "context" - "crypto/ecdsa" "encoding/hex" "fmt" "time" + pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" "golang.org/x/sync/errgroup" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" + p2pcommons "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections/mock" "github.com/bloxapp/ssv/network/testing" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/utils/format" "github.com/bloxapp/ssv/utils/rsaencryption" ) @@ -47,7 +50,7 @@ func (ln *LocalNet) WithBootnode(ctx context.Context, logger *zap.Logger) error if err != nil { return err } - isk, err := commons.ConvertToInterfacePrivkey(bnSk) + isk, err := commons.ECDSAPrivToInterface(bnSk) if err != nil { return err } @@ -70,9 +73,9 @@ func (ln *LocalNet) WithBootnode(ctx context.Context, logger *zap.Logger) error // CreateAndStartLocalNet creates a new local network and starts it // if any errors occurs during starting local network CreateAndStartLocalNet trying // to create and start local net one more time until pCtx is not Done() -func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuantity, minConnected int, useDiscv5 bool) (*LocalNet, error) { - attempt := func(pCtx context.Context, nodesQuantity, minConnected int, useDiscv5 bool) (*LocalNet, error) { - ln, err := NewLocalNet(pCtx, logger, nodesQuantity, useDiscv5) +func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, options LocalNetOptions) (*LocalNet, error) { + attempt := func(pCtx context.Context) (*LocalNet, error) { + ln, err := NewLocalNet(pCtx, logger, options) if err != nil { return nil, err } @@ -88,14 +91,14 @@ func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuant ctx, cancel := context.WithTimeout(ctx, 15*time.Second) defer cancel() var peers []peer.ID - for len(peers) < minConnected && ctx.Err() == nil { + for len(peers) < options.MinConnected && ctx.Err() == nil { peers = node.(HostProvider).Host().Network().Peers() time.Sleep(time.Millisecond * 100) } if ctx.Err() != nil { - return fmt.Errorf("could not find enough peers for node %d, nodes quantity = %d, found = %d", i, nodesQuantity, len(peers)) + return fmt.Errorf("could not find enough peers for node %d, nodes quantity = %d, found = %d", i, options.Nodes, len(peers)) } - logger.Debug("found enough peers", zap.Int("for node", i), zap.Int("nodesQuantity", nodesQuantity), zap.String("found", fmt.Sprintf("%+v", peers))) + logger.Debug("found enough peers", zap.Int("for node", i), zap.Int("nodesQuantity", options.Nodes), zap.String("found", fmt.Sprintf("%+v", peers))) return nil }) } @@ -108,7 +111,7 @@ func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuant case <-pCtx.Done(): return nil, fmt.Errorf("context is done, network didn't start on time") default: - ln, err := attempt(pCtx, nodesQuantity, minConnected, useDiscv5) + ln, err := attempt(pCtx) if err != nil { for _, node := range ln.Nodes { _ = node.Close() @@ -124,18 +127,48 @@ func CreateAndStartLocalNet(pCtx context.Context, logger *zap.Logger, nodesQuant } // NewTestP2pNetwork creates a new network.P2PNetwork instance -func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys, logger *zap.Logger, maxPeers int) (network.P2PNetwork, error) { +func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, nodeIndex int, keys testing.NodeKeys, logger *zap.Logger, options LocalNetOptions) (network.P2PNetwork, error) { operatorPubkey, err := rsaencryption.ExtractPublicKey(keys.OperatorKey) if err != nil { return nil, err } - cfg := NewNetConfig(keys.NetKey, format.OperatorID([]byte(operatorPubkey)), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), maxPeers) + cfg := NewNetConfig(keys, format.OperatorID([]byte(operatorPubkey)), ln.Bootnode, testing.RandomTCPPort(12001, 12999), ln.udpRand.Next(13001, 13999), options.Nodes) cfg.Ctx = ctx cfg.Subnets = "00000000000000000000020000000000" //PAY ATTENTION for future test scenarios which use more than one eth-validator we need to make this field dynamically changing cfg.NodeStorage = mock.NodeStorage{ MockGetPrivateKey: keys.OperatorKey, RegisteredOperatorPublicKeyPEMs: []string{}, } + cfg.Metrics = nil + cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork) + cfg.Network = networkconfig.TestNetwork + if options.TotalValidators > 0 { + cfg.GetValidatorStats = func() (uint64, uint64, uint64, error) { + return uint64(options.TotalValidators), uint64(options.ActiveValidators), uint64(options.MyValidators), nil + } + } + + pubKey, err := p2pcommons.ECDSAPrivToInterface(keys.NetKey) + if err != nil { + panic(err) + } + selfPeerID, err := peer.IDFromPublicKey(pubKey.GetPublic()) + if err != nil { + panic(err) + } + + if options.MessageValidatorProvider != nil { + cfg.MessageValidator = options.MessageValidatorProvider(nodeIndex) + } else { + cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithSelfAccept(selfPeerID, true)) + } + + if options.PeerScoreInspector != nil && options.PeerScoreInspectorInterval > 0 { + cfg.PeerScoreInspector = func(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) { + options.PeerScoreInspector(selfPeerID, peerMap) + } + cfg.PeerScoreInspectorInterval = options.PeerScoreInspectorInterval + } p := New(logger, cfg) err = p.Setup(logger) @@ -145,20 +178,28 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys return p, nil } +type LocalNetOptions struct { + MessageValidatorProvider func(int) validation.MessageValidator + Nodes int + MinConnected int + UseDiscv5 bool + TotalValidators, ActiveValidators, MyValidators int + PeerScoreInspector func(selfPeer peer.ID, peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) + PeerScoreInspectorInterval time.Duration +} + // NewLocalNet creates a new mdns network -func NewLocalNet(ctx context.Context, logger *zap.Logger, n int, useDiscv5 bool) (*LocalNet, error) { +func NewLocalNet(ctx context.Context, logger *zap.Logger, options LocalNetOptions) (*LocalNet, error) { ln := &LocalNet{} ln.udpRand = make(testing.UDPPortsRandomizer) - if useDiscv5 { + if options.UseDiscv5 { if err := ln.WithBootnode(ctx, logger); err != nil { return nil, err } } - i := 0 - nodes, keys, err := testing.NewLocalTestnet(ctx, n, func(pctx context.Context, keys testing.NodeKeys) network.P2PNetwork { - i++ - logger := logger.Named(fmt.Sprintf("node-%d", i)) - p, err := ln.NewTestP2pNetwork(pctx, keys, logger, n) + nodes, keys, err := testing.NewLocalTestnet(ctx, options.Nodes, func(pctx context.Context, nodeIndex int, keys testing.NodeKeys) network.P2PNetwork { + logger := logger.Named(fmt.Sprintf("node-%d", nodeIndex)) + p, err := ln.NewTestP2pNetwork(pctx, nodeIndex, keys, logger, options) if err != nil { logger.Error("could not setup network", zap.Error(err)) } @@ -174,7 +215,7 @@ func NewLocalNet(ctx context.Context, logger *zap.Logger, n int, useDiscv5 bool) } // NewNetConfig creates a new config for tests -func NewNetConfig(netPrivKey *ecdsa.PrivateKey, operatorID string, bn *discovery.Bootnode, tcpPort, udpPort, maxPeers int) *Config { +func NewNetConfig(keys testing.NodeKeys, operatorPubKeyHash string, bn *discovery.Bootnode, tcpPort, udpPort, maxPeers int) *Config { bns := "" discT := "discv5" if bn != nil { @@ -184,19 +225,21 @@ func NewNetConfig(netPrivKey *ecdsa.PrivateKey, operatorID string, bn *discovery } ua := "" return &Config{ - Bootnodes: bns, - TCPPort: tcpPort, - UDPPort: udpPort, - HostAddress: "", - HostDNS: "", - RequestTimeout: 10 * time.Second, - MaxBatchResponse: 25, - MaxPeers: maxPeers, - PubSubTrace: false, - NetworkPrivateKey: netPrivKey, - OperatorID: operatorID, - UserAgent: ua, - Discovery: discT, + Bootnodes: bns, + TCPPort: tcpPort, + UDPPort: udpPort, + HostAddress: "", + HostDNS: "", + RequestTimeout: 10 * time.Second, + MaxBatchResponse: 25, + MaxPeers: maxPeers, + PubSubTrace: false, + PubSubScoring: true, + NetworkPrivateKey: keys.NetKey, + OperatorPrivateKey: keys.OperatorKey, + OperatorPubKeyHash: operatorPubKeyHash, + UserAgent: ua, + Discovery: discT, Permissioned: func() bool { return false }, diff --git a/network/peers/conn_manager_test.go b/network/peers/conn_manager_test.go index 6d6c3d4f5e..a65ebf3ae2 100644 --- a/network/peers/conn_manager_test.go +++ b/network/peers/conn_manager_test.go @@ -19,7 +19,7 @@ func TestTagBestPeers(t *testing.T) { connMgrMock := newConnMgr() allSubs, _ := records.Subnets{}.FromString(records.AllSubnets) - si := newSubnetsIndex(len(allSubs)) + si := NewSubnetsIndex(len(allSubs)) cm := NewConnManager(zap.NewNop(), connMgrMock, si).(*connManager) diff --git a/network/peers/connections/mock/mock_connection_index.go b/network/peers/connections/mock/mock_connection_index.go new file mode 100644 index 0000000000..7960d80450 --- /dev/null +++ b/network/peers/connections/mock/mock_connection_index.go @@ -0,0 +1,32 @@ +package mock + +import ( + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/zap" +) + +// MockConnectionIndex is a mock implementation of the ConnectionIndex interface +type MockConnectionIndex struct { + LimitValue bool +} + +// Connectedness panics if called +func (m *MockConnectionIndex) Connectedness(id peer.ID) network.Connectedness { + panic("Connectedness method is not implemented in MockConnectionIndex") +} + +// CanConnect panics if called +func (m *MockConnectionIndex) CanConnect(id peer.ID) bool { + panic("CanConnect method is not implemented in MockConnectionIndex") +} + +// Limit returns the mock value for Limit +func (m *MockConnectionIndex) Limit(dir network.Direction) bool { + return m.LimitValue +} + +// IsBad panics if called +func (m *MockConnectionIndex) IsBad(logger *zap.Logger, id peer.ID) bool { + panic("IsBad method is not implemented in MockConnectionIndex") +} diff --git a/network/peers/connections/mock/mock_storage.go b/network/peers/connections/mock/mock_storage.go index dc85870cad..a6944eb4b9 100644 --- a/network/peers/connections/mock/mock_storage.go +++ b/network/peers/connections/mock/mock_storage.go @@ -89,7 +89,7 @@ func (m NodeStorage) DeleteOperatorData(txn basedb.ReadWriter, id spectypes.Oper func (m NodeStorage) ListOperators(txn basedb.Reader, from uint64, to uint64) ([]registrystorage.OperatorData, error) { //TODO implement me - panic("implement me") + return nil, errors.New("empty") } func (m NodeStorage) GetOperatorsPrefix() []byte { diff --git a/network/peers/index.go b/network/peers/index.go index 8e8cab40b8..35686ee2c7 100644 --- a/network/peers/index.go +++ b/network/peers/index.go @@ -4,13 +4,14 @@ import ( "crypto/rsa" "io" - "github.com/bloxapp/ssv/network/records" "github.com/libp2p/go-libp2p/core/network" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/network/records" ) const ( @@ -71,7 +72,7 @@ type NodeInfoIndex interface { NodeInfo(id peer.ID) *records.NodeInfo } -// InfoIndex is an interface for managing PeerInfo of network peers +// PeerInfoIndex is an interface for managing PeerInfo of network peers type PeerInfoIndex interface { // PeerInfo returns the PeerInfo of the given peer, or nil if not found. PeerInfo(peer.ID) *PeerInfo diff --git a/network/peers/peers_index.go b/network/peers/peers_index.go index 06d846098b..5ba5fc535a 100644 --- a/network/peers/peers_index.go +++ b/network/peers/peers_index.go @@ -45,7 +45,7 @@ func NewPeersIndex(logger *zap.Logger, network libp2pnetwork.Network, self *reco return &peersIndex{ network: network, scoreIdx: newScoreIndex(), - SubnetsIndex: newSubnetsIndex(subnetsCount), + SubnetsIndex: NewSubnetsIndex(subnetsCount), PeerInfoIndex: NewPeerInfoIndex(), self: self, selfLock: &sync.RWMutex{}, diff --git a/network/peers/scores_test.go b/network/peers/scores_test.go index d0178d45a6..763be7c974 100644 --- a/network/peers/scores_test.go +++ b/network/peers/scores_test.go @@ -15,7 +15,7 @@ func TestScoresIndex(t *testing.T) { nks, err := nettesting.CreateKeys(1) require.NoError(t, err) - sk, err := commons.ConvertToInterfacePrivkey(nks[0].NetKey) + sk, err := commons.ECDSAPrivToInterface(nks[0].NetKey) require.NoError(t, err) pid, err := peer.IDFromPrivateKey(sk) require.NoError(t, err) diff --git a/network/peers/subnets.go b/network/peers/subnets.go index f9f68cfb44..7b1b5369f5 100644 --- a/network/peers/subnets.go +++ b/network/peers/subnets.go @@ -16,7 +16,7 @@ type subnetsIndex struct { lock *sync.RWMutex } -func newSubnetsIndex(count int) SubnetsIndex { +func NewSubnetsIndex(count int) SubnetsIndex { return &subnetsIndex{ subnets: make([][]peer.ID, count), peerSubnets: map[peer.ID]records.Subnets{}, diff --git a/network/peers/subnets_test.go b/network/peers/subnets_test.go index 3460d92a93..5679ad71d9 100644 --- a/network/peers/subnets_test.go +++ b/network/peers/subnets_test.go @@ -19,7 +19,7 @@ func TestSubnetsIndex(t *testing.T) { var pids []peer.ID for _, nk := range nks { - sk, err := commons.ConvertToInterfacePrivkey(nk.NetKey) + sk, err := commons.ECDSAPrivToInterface(nk.NetKey) require.NoError(t, err) pid, err := peer.IDFromPrivateKey(sk) require.NoError(t, err) @@ -33,7 +33,7 @@ func TestSubnetsIndex(t *testing.T) { sPartial, err := records.Subnets{}.FromString("0x57b080fffd743d9878dc41a184ab160a") require.NoError(t, err) - subnetsIdx := newSubnetsIndex(128) + subnetsIdx := NewSubnetsIndex(128) subnetsIdx.UpdatePeerSubnets(pids[0], sAll.Clone()) subnetsIdx.UpdatePeerSubnets(pids[1], sNone.Clone()) diff --git a/network/records/entries.go b/network/records/entries.go new file mode 100644 index 0000000000..ef88047d90 --- /dev/null +++ b/network/records/entries.go @@ -0,0 +1,84 @@ +package records + +import ( + "io" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/rlp" + "github.com/pkg/errors" + "github.com/prysmaticlabs/go-bitfield" + + spectypes "github.com/bloxapp/ssv-spec/types" +) + +var ErrEntryNotFound = errors.New("not found") + +// DomainTypeEntry holds the domain type of the node +type DomainTypeEntry spectypes.DomainType + +// ENRKey implements enr.Entry, returns the entry key +func (dt DomainTypeEntry) ENRKey() string { return "domaintype" } + +// EncodeRLP implements rlp.Encoder, encodes domain type as bytes +func (dt DomainTypeEntry) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, dt[:]) +} + +// DecodeRLP implements rlp.Decoder, decodes domain type from bytes +func (dt *DomainTypeEntry) DecodeRLP(s *rlp.Stream) error { + var buf []byte + if err := s.Decode(&buf); err != nil { + return err + } + *dt = DomainTypeEntry(buf) + return nil +} + +// SetDomainTypeEntry adds domain type entry to the node +func SetDomainTypeEntry(node *enode.LocalNode, domainType spectypes.DomainType) error { + node.Set(DomainTypeEntry(domainType)) + return nil +} + +// GetDomainTypeEntry extracts the value of domain type entry +func GetDomainTypeEntry(record *enr.Record) (spectypes.DomainType, error) { + dt := new(DomainTypeEntry) + if err := record.Load(dt); err != nil { + if enr.IsNotFound(err) { + return spectypes.DomainType{}, ErrEntryNotFound + } + return spectypes.DomainType{}, err + } + return spectypes.DomainType(*dt), nil +} + +// SetSubnetsEntry adds subnets entry to our enode.LocalNode +func SetSubnetsEntry(node *enode.LocalNode, subnets []byte) error { + subnetsVec := bitfield.NewBitvector128() + for i, subnet := range subnets { + subnetsVec.SetBitAt(uint64(i), subnet > 0) + } + node.Set(enr.WithEntry("subnets", &subnetsVec)) + return nil +} + +// GetSubnetsEntry extracts the value of subnets entry from some record +func GetSubnetsEntry(record *enr.Record) ([]byte, error) { + subnetsVec := bitfield.NewBitvector128() + if err := record.Load(enr.WithEntry("subnets", &subnetsVec)); err != nil { + if enr.IsNotFound(err) { + return nil, ErrEntryNotFound + } + return nil, err + } + res := make([]byte, 0, subnetsVec.Len()) + for i := uint64(0); i < subnetsVec.Len(); i++ { + val := byte(0) + if subnetsVec.BitAt(i) { + val = 1 + } + res = append(res, val) + } + return res, nil +} diff --git a/network/records/subnets.go b/network/records/subnets.go index 9bba0d46ae..58dabb5c7d 100644 --- a/network/records/subnets.go +++ b/network/records/subnets.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" ) @@ -24,7 +23,7 @@ const ( // count is the amount of subnets, in case that the entry doesn't exist as we want to initialize it func UpdateSubnets(node *enode.LocalNode, count int, added []int, removed []int) ([]byte, error) { subnets, err := GetSubnetsEntry(node.Node().Record()) - if err != nil { + if err != nil && !errors.Is(err, ErrEntryNotFound) { return nil, errors.Wrap(err, "could not read subnets entry") } orig := make([]byte, len(subnets)) @@ -48,36 +47,6 @@ func UpdateSubnets(node *enode.LocalNode, count int, added []int, removed []int) return subnets, nil } -// SetSubnetsEntry adds subnets entry to our enode.LocalNode -func SetSubnetsEntry(node *enode.LocalNode, subnets []byte) error { - subnetsVec := bitfield.NewBitvector128() - for i, subnet := range subnets { - subnetsVec.SetBitAt(uint64(i), subnet > 0) - } - node.Set(enr.WithEntry("subnets", &subnetsVec)) - return nil -} - -// GetSubnetsEntry extracts the value of subnets entry from some record -func GetSubnetsEntry(record *enr.Record) ([]byte, error) { - subnetsVec := bitfield.NewBitvector128() - if err := record.Load(enr.WithEntry("subnets", &subnetsVec)); err != nil { - if enr.IsNotFound(err) { - return nil, nil - } - return nil, err - } - res := make([]byte, 0, subnetsVec.Len()) - for i := uint64(0); i < subnetsVec.Len(); i++ { - val := byte(0) - if subnetsVec.BitAt(i) { - val = 1 - } - res = append(res, val) - } - return res, nil -} - // Subnets holds all the subscribed subnets of a specific node type Subnets []byte diff --git a/network/records/subnets_test.go b/network/records/subnets_test.go index b09dd12fd0..47e35f88ab 100644 --- a/network/records/subnets_test.go +++ b/network/records/subnets_test.go @@ -15,7 +15,7 @@ func Test_SubnetsEntry(t *testing.T) { SubnetsCount := 128 priv, _, err := crypto.GenerateSecp256k1Key(crand.Reader) require.NoError(t, err) - sk, err := commons.ConvertFromInterfacePrivKey(priv) + sk, err := commons.ECDSAPrivFromInterface(priv) require.NoError(t, err) ip, err := commons.IPAddr() require.NoError(t, err) diff --git a/network/syncing/concurrent.go b/network/syncing/concurrent.go deleted file mode 100644 index d3ddcd2ec1..0000000000 --- a/network/syncing/concurrent.go +++ /dev/null @@ -1,189 +0,0 @@ -package syncing - -import ( - "context" - "fmt" - "sync" - "time" - - "go.uber.org/zap" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" -) - -// Error describes an error that occurred during a syncing operation. -type Error struct { - Operation Operation - Err error -} - -func (e Error) Error() string { - return fmt.Sprintf("%s: %v", e.Operation, e.Err) -} - -// Timeouts is a set of timeouts for each syncing operation. -type Timeouts struct { - // SyncHighestDecided is the timeout for SyncHighestDecided. - // Leave zero to not timeout. - SyncHighestDecided time.Duration - - // SyncDecidedByRange is the timeout for SyncDecidedByRange. - // Leave zero to not timeout. - SyncDecidedByRange time.Duration -} - -var DefaultTimeouts = Timeouts{ - SyncHighestDecided: 12 * time.Second, - SyncDecidedByRange: 30 * time.Minute, -} - -// Operation is a syncing operation that has been queued for execution. -type Operation interface { - run(context.Context, *zap.Logger, Syncer) error - timeout(Timeouts) time.Duration -} - -type OperationSyncHighestDecided struct { - ID spectypes.MessageID - Handler MessageHandler -} - -func (o OperationSyncHighestDecided) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncHighestDecided(ctx, logger, o.ID, o.Handler) -} - -func (o OperationSyncHighestDecided) timeout(t Timeouts) time.Duration { - return t.SyncHighestDecided -} - -func (o OperationSyncHighestDecided) String() string { - return fmt.Sprintf("SyncHighestDecided(%s)", o.ID) -} - -type OperationSyncDecidedByRange struct { - ID spectypes.MessageID - From specqbft.Height - To specqbft.Height - Handler MessageHandler -} - -func (o OperationSyncDecidedByRange) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncDecidedByRange(ctx, logger, o.ID, o.From, o.To, o.Handler) -} - -func (o OperationSyncDecidedByRange) timeout(t Timeouts) time.Duration { - return t.SyncDecidedByRange -} - -func (o OperationSyncDecidedByRange) String() string { - return fmt.Sprintf("SyncDecidedByRange(%s, %d, %d)", o.ID, o.From, o.To) -} - -// ConcurrentSyncer is a Syncer that runs the given Syncer's methods concurrently. -type ConcurrentSyncer struct { - syncer Syncer - ctx context.Context - jobs chan Operation - errors chan<- Error - concurrency int - timeouts Timeouts -} - -// NewConcurrent returns a new Syncer that runs the given Syncer's methods concurrently. -// Unlike the standard syncer, syncing methods are non-blocking and return immediately without error. -// concurrency is the number of worker goroutines to spawn. -// errors is a channel to which any errors are sent. Pass nil to discard errors. -func NewConcurrent( - ctx context.Context, - syncer Syncer, - concurrency int, - timeouts Timeouts, - errors chan<- Error, -) *ConcurrentSyncer { - return &ConcurrentSyncer{ - syncer: syncer, - ctx: ctx, - // TODO: make the buffer size configurable or better-yet unbounded? - jobs: make(chan Operation, 128*1024), - errors: errors, - concurrency: concurrency, - timeouts: timeouts, - } -} - -// Run starts the worker goroutines and blocks until the context is done -// and any remaining jobs are finished. -func (s *ConcurrentSyncer) Run(logger *zap.Logger) { - // Spawn worker goroutines. - var wg sync.WaitGroup - for i := 0; i < s.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for job := range s.jobs { - s.do(logger, job) - } - }() - } - - // Close the jobs channel when the context is done. - <-s.ctx.Done() - close(s.jobs) - - // Wait for workers to finish their current jobs. - wg.Wait() -} - -func (s *ConcurrentSyncer) do(logger *zap.Logger, job Operation) { - ctx, cancel := context.WithTimeout(s.ctx, job.timeout(s.timeouts)) - defer cancel() - err := job.run(ctx, logger, s.syncer) - if err != nil && s.errors != nil { - s.errors <- Error{ - Operation: job, - Err: err, - } - } -} - -// Queued returns the number of jobs that are queued but not yet started. -func (s *ConcurrentSyncer) Queued() int { - return len(s.jobs) -} - -// Capacity returns the maximum number of jobs that can be queued. -// When Queued() == Capacity(), then the next call will block -// until a job is finished. -func (s *ConcurrentSyncer) Capacity() int { - return cap(s.jobs) -} - -func (s *ConcurrentSyncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - s.jobs <- OperationSyncHighestDecided{ - ID: id, - Handler: handler, - } - return nil -} - -func (s *ConcurrentSyncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - s.jobs <- OperationSyncDecidedByRange{ - ID: id, - From: from, - To: to, - Handler: handler, - } - return nil -} diff --git a/network/syncing/concurrent_test.go b/network/syncing/concurrent_test.go deleted file mode 100644 index ace426f6a2..0000000000 --- a/network/syncing/concurrent_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package syncing_test - -import ( - "context" - "fmt" - "runtime" - "testing" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/syncing" - "github.com/bloxapp/ssv/network/syncing/mocks" -) - -func TestConcurrentSyncer(t *testing.T) { - logger := logging.TestLogger(t) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Test setup - syncer := mocks.NewMockSyncer(ctrl) - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(nil) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - syncer.EXPECT().SyncDecidedByRange(gomock.Any(), gomock.Any(), id, from, to, gomock.Any()).Return(nil) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - - // Test error handling - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(fmt.Errorf("test error")) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Wait for the syncer to finish - cancel() - - // Verify errors. - select { - case err := <-errors: - require.IsType(t, syncing.OperationSyncHighestDecided{}, err.Operation) - require.Equal(t, id, err.Operation.(syncing.OperationSyncHighestDecided).ID) - require.Equal(t, "test error", err.Err.Error()) - case <-done: - t.Fatal("error channel should have received an error") - } - <-done -} - -func TestConcurrentSyncerMemoryUsage(t *testing.T) { - logger := logging.TestLogger(t) - - for i := 0; i < 4; i++ { - var before runtime.MemStats - runtime.ReadMemStats(&before) - - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - - var after runtime.MemStats - runtime.ReadMemStats(&after) - t.Logf("Allocated: %.2f MB", float64(after.TotalAlloc-before.TotalAlloc)/1024/1024) - } -} - -func BenchmarkConcurrentSyncer(b *testing.B) { - logger := logging.BenchLogger(b) - - for i := 0; i < b.N; i++ { - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(b, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(b, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - } -} diff --git a/network/syncing/mocks/syncer.go b/network/syncing/mocks/syncer.go deleted file mode 100644 index 1aa3a3d55d..0000000000 --- a/network/syncing/mocks/syncer.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./syncer.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - qbft "github.com/bloxapp/ssv-spec/qbft" - types "github.com/bloxapp/ssv-spec/types" - syncing "github.com/bloxapp/ssv/network/syncing" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - gomock "github.com/golang/mock/gomock" - zap "go.uber.org/zap" -) - -// MockSyncer is a mock of Syncer interface. -type MockSyncer struct { - ctrl *gomock.Controller - recorder *MockSyncerMockRecorder -} - -// MockSyncerMockRecorder is the mock recorder for MockSyncer. -type MockSyncerMockRecorder struct { - mock *MockSyncer -} - -// NewMockSyncer creates a new mock instance. -func NewMockSyncer(ctrl *gomock.Controller) *MockSyncer { - mock := &MockSyncer{ctrl: ctrl} - mock.recorder = &MockSyncerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSyncer) EXPECT() *MockSyncerMockRecorder { - return m.recorder -} - -// SyncDecidedByRange mocks base method. -func (m *MockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id types.MessageID, from, to qbft.Height, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncDecidedByRange", ctx, logger, id, from, to, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncDecidedByRange indicates an expected call of SyncDecidedByRange. -func (mr *MockSyncerMockRecorder) SyncDecidedByRange(ctx, logger, id, from, to, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncDecidedByRange", reflect.TypeOf((*MockSyncer)(nil).SyncDecidedByRange), ctx, logger, id, from, to, handler) -} - -// SyncHighestDecided mocks base method. -func (m *MockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id types.MessageID, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncHighestDecided", ctx, logger, id, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncHighestDecided indicates an expected call of SyncHighestDecided. -func (mr *MockSyncerMockRecorder) SyncHighestDecided(ctx, logger, id, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncHighestDecided", reflect.TypeOf((*MockSyncer)(nil).SyncHighestDecided), ctx, logger, id, handler) -} - -// MockNetwork is a mock of Network interface. -type MockNetwork struct { - ctrl *gomock.Controller - recorder *MockNetworkMockRecorder -} - -// MockNetworkMockRecorder is the mock recorder for MockNetwork. -type MockNetworkMockRecorder struct { - mock *MockNetwork -} - -// NewMockNetwork creates a new mock instance. -func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { - mock := &MockNetwork{ctrl: ctrl} - mock.recorder = &MockNetworkMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { - return m.recorder -} - -// GetHistory mocks base method. -func (m *MockNetwork) GetHistory(logger *zap.Logger, id types.MessageID, from, to qbft.Height, targets ...string) ([]protocolp2p.SyncResult, qbft.Height, error) { - m.ctrl.T.Helper() - varargs := []interface{}{logger, id, from, to} - for _, a := range targets { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetHistory", varargs...) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(qbft.Height) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetHistory indicates an expected call of GetHistory. -func (mr *MockNetworkMockRecorder) GetHistory(logger, id, from, to interface{}, targets ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{logger, id, from, to}, targets...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistory", reflect.TypeOf((*MockNetwork)(nil).GetHistory), varargs...) -} - -// LastDecided mocks base method. -func (m *MockNetwork) LastDecided(logger *zap.Logger, id types.MessageID) ([]protocolp2p.SyncResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastDecided", logger, id) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LastDecided indicates an expected call of LastDecided. -func (mr *MockNetworkMockRecorder) LastDecided(logger, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastDecided", reflect.TypeOf((*MockNetwork)(nil).LastDecided), logger, id) -} diff --git a/network/syncing/syncer.go b/network/syncing/syncer.go deleted file mode 100644 index db36a94028..0000000000 --- a/network/syncing/syncer.go +++ /dev/null @@ -1,207 +0,0 @@ -package syncing - -import ( - "context" - "time" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/utils/tasks" -) - -//go:generate mockgen -package=mocks -destination=./mocks/syncer.go -source=./syncer.go - -// MessageHandler reacts to a message received from Syncer. -type MessageHandler func(msg spectypes.SSVMessage) - -// Syncer handles the syncing of decided messages. -type Syncer interface { - SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler MessageHandler) error - SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, - ) error -} - -// Network is a subset of protocolp2p.Syncer, required by Syncer to retrieve messages from peers. -type Network interface { - LastDecided(logger *zap.Logger, id spectypes.MessageID) ([]protocolp2p.SyncResult, error) - GetHistory( - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - targets ...string, - ) ([]protocolp2p.SyncResult, specqbft.Height, error) -} - -type syncer struct { - network Network -} - -// New returns a standard implementation of Syncer. -func New(network Network) Syncer { - return &syncer{ - network: network, - } -} - -func (s *syncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncHighestDecided"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType())) - - lastDecided, err := s.network.LastDecided(logger, id) - if err != nil { - logger.Debug("last decided sync failed", zap.Error(err)) - return errors.Wrap(err, "could not sync last decided") - } - if len(lastDecided) == 0 { - logger.Debug("no messages were synced") - return nil - } - - results := protocolp2p.SyncResults(lastDecided) - var maxHeight specqbft.Height - results.ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if m.Message.Height > maxHeight { - maxHeight = m.Message.Height - } - raw, err := m.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return false - } - handler(spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - }) - return false - }) - logger.Debug("synced last decided", zap.Uint64("highest_height", uint64(maxHeight)), zap.Int("messages", len(lastDecided))) - return nil -} - -func (s *syncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncDecidedByRange"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType()), - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - logger.Debug("syncing decided by range") - - err := s.getDecidedByRange( - context.Background(), - logger, - id, - from, - to, - func(sm *specqbft.SignedMessage) error { - raw, err := sm.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return nil - } - handler(spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - }) - return nil - }, - ) - if err != nil { - logger.Debug("sync failed", zap.Error(err)) - } - return err -} - -// getDecidedByRange calls GetHistory in batches to retrieve all decided messages in the given range. -func (s *syncer) getDecidedByRange( - ctx context.Context, - logger *zap.Logger, - mid spectypes.MessageID, - from, to specqbft.Height, - handler func(*specqbft.SignedMessage) error, -) error { - const maxRetries = 2 - - var ( - visited = make(map[specqbft.Height]struct{}) - msgs []protocolp2p.SyncResult - ) - - tail := from - var err error - for tail < to { - if ctx.Err() != nil { - return ctx.Err() - } - err := tasks.RetryWithContext(ctx, func() error { - start := time.Now() - msgs, tail, err = s.network.GetHistory(logger, mid, tail, to) - if err != nil { - return err - } - handled := 0 - protocolp2p.SyncResults(msgs).ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if _, ok := visited[m.Message.Height]; ok { - return false - } - if err := handler(m); err != nil { - logger.Warn("could not handle signed message") - } - handled++ - visited[m.Message.Height] = struct{}{} - return false - }) - logger.Debug("received and processed history batch", - zap.Int64("tail", int64(tail)), - fields.Duration(start), - zap.Int("results_count", len(msgs)), - fields.SyncResults(msgs), - zap.Int("handled", handled)) - return nil - }, maxRetries) - if err != nil { - return err - } - } - - return nil -} diff --git a/network/syncing/syncer_test.go b/network/syncing/syncer_test.go deleted file mode 100644 index e0f99c3fb4..0000000000 --- a/network/syncing/syncer_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package syncing_test - -import ( - "context" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/network/syncing" -) - -type mockSyncer struct{} - -func (m *mockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler syncing.MessageHandler) error { - return nil -} - -func (m *mockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, from specqbft.Height, to specqbft.Height, handler syncing.MessageHandler) error { - return nil -} - -type mockMessageHandler struct { - calls int - handler syncing.MessageHandler -} - -func newMockMessageHandler() *mockMessageHandler { - m := &mockMessageHandler{} - m.handler = func(msg spectypes.SSVMessage) { - m.calls++ - } - return m -} diff --git a/network/testing/local.go b/network/testing/local.go index d3610dddae..2c36b970d7 100644 --- a/network/testing/local.go +++ b/network/testing/local.go @@ -7,7 +7,7 @@ import ( ) // NetworkFactory is a generic factory for network instances -type NetworkFactory func(pctx context.Context, keys NodeKeys) network.P2PNetwork +type NetworkFactory func(pctx context.Context, nodeIndex int, keys NodeKeys) network.P2PNetwork // NewLocalTestnet creates a new local network func NewLocalTestnet(ctx context.Context, n int, factory NetworkFactory) ([]network.P2PNetwork, []NodeKeys, error) { @@ -18,7 +18,7 @@ func NewLocalTestnet(ctx context.Context, n int, factory NetworkFactory) ([]netw } for i, k := range keys { - nodes[i] = factory(ctx, k) + nodes[i] = factory(ctx, i, k) } return nodes, keys, nil diff --git a/network/topics/controller.go b/network/topics/controller.go index 3ac1dea7e6..8e58a34589 100644 --- a/network/topics/controller.go +++ b/network/topics/controller.go @@ -6,13 +6,13 @@ import ( "strconv" "time" - spectypes "github.com/bloxapp/ssv-spec/types" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "go.uber.org/zap" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) var ( @@ -37,7 +37,11 @@ type Controller interface { } // PubsubMessageHandler handles incoming messages -type PubsubMessageHandler func(string, *pubsub.Message) error +type PubsubMessageHandler func(context.Context, string, *pubsub.Message) error + +type messageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} // topicsCtrl implements Controller type topicsCtrl struct { @@ -45,25 +49,31 @@ type topicsCtrl struct { logger *zap.Logger // struct logger to implement i.Closer ps *pubsub.PubSub // scoreParamsFactory is a function that helps to set scoring params on topics - scoreParamsFactory func(string) *pubsub.TopicScoreParams - msgValidatorFactory func(string) MsgValidatorFunc - msgHandler PubsubMessageHandler - subFilter SubFilter + scoreParamsFactory func(string) *pubsub.TopicScoreParams + msgValidator messageValidator + msgHandler PubsubMessageHandler + subFilter SubFilter container *topicsContainer } // NewTopicsController creates an instance of Controller -func NewTopicsController(ctx context.Context, logger *zap.Logger, msgHandler PubsubMessageHandler, - msgValidatorFactory func(string) MsgValidatorFunc, subFilter SubFilter, pubSub *pubsub.PubSub, - scoreParams func(string) *pubsub.TopicScoreParams) Controller { +func NewTopicsController( + ctx context.Context, + logger *zap.Logger, + msgHandler PubsubMessageHandler, + msgValidator messageValidator, + subFilter SubFilter, + pubSub *pubsub.PubSub, + scoreParams func(string) *pubsub.TopicScoreParams, +) Controller { ctrl := &topicsCtrl{ - ctx: ctx, - logger: logger, - ps: pubSub, - scoreParamsFactory: scoreParams, - msgValidatorFactory: msgValidatorFactory, - msgHandler: msgHandler, + ctx: ctx, + logger: logger, + ps: pubSub, + scoreParamsFactory: scoreParams, + msgValidator: msgValidator, + msgHandler: msgHandler, subFilter: subFilter, } @@ -171,7 +181,7 @@ func (ctrl *topicsCtrl) Broadcast(name string, data []byte, timeout time.Duratio func (ctrl *topicsCtrl) Unsubscribe(logger *zap.Logger, name string, hard bool) error { ctrl.container.Unsubscribe(name) - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { err := ctrl.ps.UnregisterTopicValidator(name) if err != nil { logger.Debug("could not unregister msg validator", zap.String("topic", name), zap.Error(err)) @@ -207,7 +217,9 @@ func (ctrl *topicsCtrl) start(logger *zap.Logger, name string, sub *pubsub.Subsc func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) error { ctx, cancel := context.WithCancel(ctrl.ctx) defer cancel() + topicName := sub.Topic() + logger = logger.With(zap.String("topic", topicName)) logger.Debug("start listening to topic") for ctx.Err() == nil { @@ -228,14 +240,14 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err continue } - if ssvMsg, ok := msg.ValidatorData.(spectypes.SSVMessage); ok { + if ssvMsg, ok := msg.ValidatorData.(*queue.DecodedSSVMessage); ok { metricPubsubInbound.WithLabelValues( commons.GetTopicBaseName(topicName), strconv.FormatUint(uint64(ssvMsg.MsgType), 10), ).Inc() } - if err := ctrl.msgHandler(topicName, msg); err != nil { + if err := ctrl.msgHandler(ctx, topicName, msg); err != nil { logger.Debug("could not handle msg", zap.Error(err)) } } @@ -244,7 +256,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err // setupTopicValidator registers the topic validator func (ctrl *topicsCtrl) setupTopicValidator(name string) error { - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { // first try to unregister in case there is already a msg validator for that topic (e.g. fork scenario) _ = ctrl.ps.UnregisterTopicValidator(name) @@ -252,7 +264,7 @@ func (ctrl *topicsCtrl) setupTopicValidator(name string) error { // Optional: set a timeout for message validation // opts = append(opts, pubsub.WithValidatorTimeout(time.Second)) - err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidatorFactory(name), opts...) + err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidator.ValidatorForTopic(name), opts...) if err != nil { return errors.Wrap(err, "could not register topic validator") } diff --git a/network/topics/controller_test.go b/network/topics/controller_test.go index bc1e028cc4..c48ff4564d 100644 --- a/network/topics/controller_test.go +++ b/network/topics/controller_test.go @@ -2,61 +2,94 @@ package topics import ( "context" + "encoding/base64" "encoding/hex" - "fmt" + "encoding/json" + "math" "sync" "sync/atomic" "testing" "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/commons" - - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" + "github.com/bloxapp/ssv/networkconfig" ) func TestTopicManager(t *testing.T) { logger := logging.TestLogger(t) - nPeers := 4 - - pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", - "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", - "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", - "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", - "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", - "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", - "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", - "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - peers := newPeers(ctx, logger, t, nPeers, false, true) - baseTest(t, ctx, logger, peers, pks, 1, 2) + + t.Run("happy flow", func(t *testing.T) { + nPeers := 4 + + pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", + "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", + "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", + "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + peers := newPeers(ctx, logger, t, nPeers, validator, true, nil) + baseTest(t, ctx, logger, peers, pks, 1, 2) + }) + + t.Run("banning peer", func(t *testing.T) { + t.Skip() // TODO: finish the test + + pks := []string{ + "b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + scoreMap := map[peer.ID]*pubsub.PeerScoreSnapshot{} + var scoreMapMu sync.Mutex + + scoreInspector := func(m map[peer.ID]*pubsub.PeerScoreSnapshot) { + b, _ := json.Marshal(m) + t.Logf("peer scores: %v", string(b)) + + scoreMapMu.Lock() + defer scoreMapMu.Unlock() + + scoreMap = m + } + + const nPeers = 4 + peers := newPeers(ctx, logger, t, nPeers, validator, true, scoreInspector) + banningTest(t, ctx, logger, peers, pks, scoreMap, &scoreMapMu) + }) } func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, minMsgCount, maxMsgCount int) { nValidators := len(pks) // nPeers := len(peers) - validatorTopic := func(pkhex string) string { - pk, err := hex.DecodeString(pkhex) - if err != nil { - return "invalid" - } - return commons.ValidatorTopicID(pk)[0] - } - t.Log("subscribing to topics") // listen to topics for _, pk := range pks { @@ -85,7 +118,7 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Add(1) go func(p *P, pk string, pi int) { defer wg.Done() - msg, err := dummyMsg(pk, pi%4) + msg, err := dummyMsg(pk, pi%4, false) require.NoError(t, err) raw, err := msg.Encode() require.NoError(t, err) @@ -146,6 +179,109 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Wait() } +func banningTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, scoreMap map[peer.ID]*pubsub.PeerScoreSnapshot, scoreMapMu *sync.Mutex) { + t.Log("subscribing to topics") + + for _, pk := range pks { + for _, p := range peers { + require.NoError(t, p.tm.Subscribe(logger, validatorTopic(pk))) + } + } + + // wait for the peers to join topics + <-time.After(3 * time.Second) + + t.Log("checking initial scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) + } + } + } + + t.Log("broadcasting messages") + + const invalidMessagesCount = 10 + + // TODO: get current default score, send an invalid rejected message, check the score; then run 10 of them and check the score; then check valid message + + invalidMessages, err := msgSequence(pks[0], invalidMessagesCount, len(pks), true) + require.NoError(t, err) + + var wg sync.WaitGroup + // publish some messages + for i, msg := range invalidMessages { + wg.Add(1) + go func(p *P, pk string, msg *spectypes.SSVMessage) { + defer wg.Done() + + raw, err := msg.Encode() + require.NoError(t, err) + + require.NoError(t, p.tm.Broadcast(validatorTopic(pk), raw, time.Second*10)) + + <-time.After(time.Second * 5) + }(peers[0], pks[i%len(pks)], msg) + } + wg.Wait() + + <-time.After(5 * time.Second) + + t.Log("checking final scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) // TODO: score should change + } + } + } + + //t.Log("unsubscribing") + //// unsubscribing multiple times for each topic + //wg.Add(1) + //go func(p *P, pk string) { + // defer wg.Done() + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // go func(p *P) { + // <-time.After(time.Millisecond) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + // wg.Add(1) + // go func(p *P) { + // defer wg.Done() + // <-time.After(time.Millisecond * 50) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + //}(peer, pk) + // + //wg.Wait() +} + +func validatorTopic(pkhex string) string { + pk, err := hex.DecodeString(pkhex) + if err != nil { + return "invalid" + } + return commons.ValidatorTopicID(pk)[0] +} + type P struct { host host.Host ps *pubsub.PubSub @@ -181,10 +317,10 @@ func (p *P) saveMsg(t string, msg *pubsub.Message) { } // TODO: use p2p/testing -func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator, msgID bool) []*P { +func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) []*P { peers := make([]*P, n) for i := 0; i < n; i++ { - peers[i] = newPeer(ctx, logger, t, msgValidator, msgID) + peers[i] = newPeer(ctx, logger, t, msgValidator, msgID, scoreInspector) } t.Logf("%d peers were created", n) th := uint64(n/2) + uint64(n/4) @@ -203,7 +339,7 @@ func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgV return peers } -func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator, msgID bool) *P { +func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) *P { h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0")) require.NoError(t, err) ds, err := discovery.NewLocalDiscovery(ctx, logger, h) @@ -212,14 +348,14 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator var p *P var midHandler MsgIDHandler if msgID { - midHandler = NewMsgIDHandler(ctx, 2*time.Minute) + midHandler = NewMsgIDHandler(ctx, 2*time.Minute, networkconfig.TestNetwork) go midHandler.Start() } - cfg := &PububConfig{ + cfg := &PubSubConfig{ Host: h, TraceLog: false, MsgIDHandler: midHandler, - MsgHandler: func(topic string, msg *pubsub.Message) error { + MsgHandler: func(_ context.Context, topic string, msg *pubsub.Message) error { p.saveMsg(topic, msg) return nil }, @@ -228,15 +364,13 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator IPColocationWeight: 0, OneEpochDuration: time.Minute, }, + MsgValidator: msgValidator, + ScoreInspector: scoreInspector, + ScoreInspectorInterval: 100 * time.Millisecond, // TODO: add mock for peers.ScoreIndex } - // - if msgValidator { - cfg.MsgValidatorFactory = func(s string) MsgValidatorFunc { - return NewSSVMsgValidator() - } - } - ps, tm, err := NewPubsub(ctx, logger, cfg) + + ps, tm, err := NewPubSub(ctx, logger, cfg) require.NoError(t, err) p = &P{ @@ -258,28 +392,63 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator return p } -func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { +func msgSequence(pkHex string, n, committeeSize int, malformed bool) ([]*spectypes.SSVMessage, error) { + var messages []*spectypes.SSVMessage + + for i := 0; i < n; i++ { + height := i * committeeSize + msg, err := dummyMsg(pkHex, height, malformed) + if err != nil { + return nil, err + } + + messages = append(messages, msg) + } + + return messages, nil +} + +func dummyMsg(pkHex string, height int, malformed bool) (*spectypes.SSVMessage, error) { pk, err := hex.DecodeString(pkHex) if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) - return &spectypes.SSVMessage{ + + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + signature, err := base64.StdEncoding.DecodeString("sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN") + if err != nil { + return nil, err + } + + signedMessage := specqbft.SignedMessage{ + Signature: signature, + Signers: []spectypes.OperatorID{1, 3, 4}, + Message: specqbft.Message{ + MsgType: specqbft.RoundChangeMsgType, + Height: specqbft.Height(height), + Round: 2, + Identifier: id[:], + Root: [32]byte{}, + }, + FullData: nil, + } + + msgData, err := signedMessage.Encode() + if err != nil { + return nil, err + } + + ssvMsg := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), - }, nil + Data: msgData, + } + + if malformed { + ssvMsg.MsgType = math.MaxUint64 + } + + return ssvMsg, nil } // diff --git a/network/topics/metrics.go b/network/topics/metrics.go index 53c651967e..7df570090a 100644 --- a/network/topics/metrics.go +++ b/network/topics/metrics.go @@ -6,15 +6,12 @@ import ( "go.uber.org/zap" ) +// TODO: replace with new metrics var ( metricPubsubTrace = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:network:pubsub:trace", Help: "Traces of pubsub messages", }, []string{"type"}) - metricPubsubMsgValidationResults = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:network:pubsub:msg:validation", - Help: "Traces of pubsub message validation results", - }, []string{"type"}) metricPubsubOutbound = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:p2p:pubsub:msg:out", Help: "Count broadcasted messages", @@ -23,10 +20,6 @@ var ( Name: "ssv:p2p:pubsub:msg:in", Help: "Count incoming messages", }, []string{"topic", "msg_type"}) - metricPubsubActiveMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ssv:p2p:pubsub:msg:val:active", - Help: "Count active message validation", - }, []string{"topic"}) metricPubsubPeerScoreInspect = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ssv:p2p:pubsub:score:inspect", Help: "Gauge for negative peer scores", @@ -38,30 +31,13 @@ func init() { if err := prometheus.Register(metricPubsubTrace); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubMsgValidationResults); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubOutbound); err != nil { logger.Debug("could not register prometheus collector") } if err := prometheus.Register(metricPubsubInbound); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubActiveMsgValidation); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubPeerScoreInspect); err != nil { logger.Debug("could not register prometheus collector") } } - -type msgValidationResult string - -var ( - validationResultNoData msgValidationResult = "no_data" - validationResultEncoding msgValidationResult = "encoding" -) - -func reportValidationResult(result msgValidationResult) { - metricPubsubMsgValidationResults.WithLabelValues(string(result)).Inc() -} diff --git a/network/topics/msg_id.go b/network/topics/msg_id.go index 92af89b1d0..e7cff81b2e 100644 --- a/network/topics/msg_id.go +++ b/network/topics/msg_id.go @@ -8,6 +8,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" ps_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" @@ -53,21 +54,23 @@ type msgIDEntry struct { // msgIDHandler implements MsgIDHandler type msgIDHandler struct { - ctx context.Context - added chan addedEvent - ids map[string]*msgIDEntry - locker sync.Locker - ttl time.Duration + ctx context.Context + added chan addedEvent + ids map[string]*msgIDEntry + locker sync.Locker + ttl time.Duration + networkConfig networkconfig.NetworkConfig } // NewMsgIDHandler creates a new MsgIDHandler -func NewMsgIDHandler(ctx context.Context, ttl time.Duration) MsgIDHandler { +func NewMsgIDHandler(ctx context.Context, ttl time.Duration, networkConfig networkconfig.NetworkConfig) MsgIDHandler { handler := &msgIDHandler{ - ctx: ctx, - added: make(chan addedEvent, msgIDHandlerBufferSize), - ids: make(map[string]*msgIDEntry), - locker: &sync.Mutex{}, - ttl: ttl, + ctx: ctx, + added: make(chan addedEvent, msgIDHandlerBufferSize), + ids: make(map[string]*msgIDEntry), + locker: &sync.Mutex{}, + ttl: ttl, + networkConfig: networkConfig, } return handler } @@ -96,31 +99,51 @@ func (handler *msgIDHandler) MsgID(logger *zap.Logger) func(pmsg *ps_pb.Message) if pmsg == nil { return MsgIDEmptyMessage } - //logger := logger.With() - if len(pmsg.GetData()) == 0 { + + messageData := pmsg.GetData() + if len(messageData) == 0 { return MsgIDEmptyMessage } + pid, err := peer.IDFromBytes(pmsg.GetFrom()) if err != nil { return MsgIDBadPeerID } - mid := commons.MsgID()(pmsg.GetData()) + + mid := handler.pubsubMsgToMsgID(messageData) + if len(mid) == 0 { logger.Debug("could not create msg_id", zap.ByteString("seq_no", pmsg.GetSeqno()), fields.PeerID(pid)) return MsgIDError } + handler.Add(mid, pid) return mid } } +func (handler *msgIDHandler) pubsubMsgToMsgID(msg []byte) string { + currentEpoch := handler.networkConfig.Beacon.EstimatedCurrentEpoch() + if currentEpoch > handler.networkConfig.PermissionlessActivationEpoch { + decodedMsg, _, _, err := commons.DecodeSignedSSVMessage(msg) + if err != nil { + // todo: should err here or just log and let the decode function err? + } else { + return commons.MsgID()(decodedMsg) + } + } + return commons.MsgID()(msg) +} + // GetPeers returns the peers that are related to the given msg func (handler *msgIDHandler) GetPeers(msg []byte) []peer.ID { - msgID := commons.MsgID()(msg) + msgID := handler.pubsubMsgToMsgID(msg) + handler.locker.Lock() defer handler.locker.Unlock() + entry, ok := handler.ids[msgID] if ok { if !entry.t.Add(handler.ttl).After(time.Now()) { diff --git a/network/topics/msg_validator.go b/network/topics/msg_validator.go deleted file mode 100644 index f1329fa698..0000000000 --- a/network/topics/msg_validator.go +++ /dev/null @@ -1,67 +0,0 @@ -package topics - -import ( - "context" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/bloxapp/ssv/network/commons" -) - -// MsgValidatorFunc represents a message validator -type MsgValidatorFunc = func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult - -// NewSSVMsgValidator creates a new msg validator that validates message structure, -// and checks that the message was sent on the right topic. -// TODO: enable post SSZ change, remove logs, break into smaller validators? -func NewSSVMsgValidator() func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - return func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { - topic := pmsg.GetTopic() - metricPubsubActiveMsgValidation.WithLabelValues(topic).Inc() - defer metricPubsubActiveMsgValidation.WithLabelValues(topic).Dec() - if len(pmsg.GetData()) == 0 { - reportValidationResult(validationResultNoData) - return pubsub.ValidationReject - } - msg, err := commons.DecodeNetworkMsg(pmsg.GetData()) - if err != nil { - // can't decode message - // logger.Debug("invalid: can't decode message", zap.Error(err)) - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - if msg == nil { - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - pmsg.ValidatorData = *msg - return pubsub.ValidationAccept - - // Check if the message was sent on the right topic. - // currentTopic := pmsg.GetTopic() - // currentTopicBaseName := fork.GetTopicBaseName(currentTopic) - // topics := fork.ValidatorTopicID(msg.GetID().GetPubKey()) - // for _, tp := range topics { - // if tp == currentTopicBaseName { - // reportValidationResult(validationResultValid) - // return pubsub.ValidationAccept - // } - //} - // reportValidationResult(validationResultTopic) - // return pubsub.ValidationReject - } -} - -//// CombineMsgValidators executes multiple validators -// func CombineMsgValidators(validators ...MsgValidatorFunc) MsgValidatorFunc { -// return func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { -// res := pubsub.ValidationAccept -// for _, v := range validators { -// if res = v(ctx, p, msg); res == pubsub.ValidationReject { -// break -// } -// } -// return res -// } -//} diff --git a/network/topics/msg_validator_test.go b/network/topics/msg_validator_test.go index 3a4f6b2081..dc24227ccd 100644 --- a/network/topics/msg_validator_test.go +++ b/network/topics/msg_validator_test.go @@ -2,44 +2,69 @@ package topics import ( "context" - "encoding/hex" - "fmt" "testing" + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/herumi/bls-eth-go-binary/bls" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" pubsub "github.com/libp2p/go-libp2p-pubsub" ps_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network/commons" - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/bloxapp/ssv/utils/threshold" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" ) func TestMsgValidator(t *testing.T) { - pks := createSharePublicKeys(4) - mv := NewSSVMsgValidator() + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: v1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + mv := validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithNodeStorage(ns)) require.NotNil(t, mv) + slot := networkconfig.TestNetwork.Beacon.GetBeaconNetwork().EstimatedCurrentSlot() + t.Run("valid consensus msg", func(t *testing.T) { - pkHex := pks[0] - msg, err := dummySSVConsensusMsg(pkHex, 15160) + msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, qbft.Height(slot)) require.NoError(t, err) + raw, err := msg.Encode() require.NoError(t, err) - pk, err := hex.DecodeString(pkHex) - require.NoError(t, err) - topics := commons.ValidatorTopicID(pk) + + topics := commons.ValidatorTopicID(share.ValidatorPubKey) pmsg := newPBMsg(raw, commons.GetTopicFullName(topics[0]), []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) - require.Equal(t, res, pubsub.ValidationAccept) + res := mv.ValidatePubsubMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + require.Equal(t, pubsub.ValidationAccept, res) }) // TODO: enable once topic validation is in place - // t.Run("wrong topic", func(t *testing.T) { + //t.Run("wrong topic", func(t *testing.T) { // pkHex := "b5de683dbcb3febe8320cc741948b9282d59b75a6970ed55d6f389da59f26325331b7ea0e71a2552373d0debb6048b8a" - // msg, err := dummySSVConsensusMsg(pkHex, 15160) + // msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, 15160) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) @@ -47,40 +72,26 @@ func TestMsgValidator(t *testing.T) { // require.NoError(t, err) // topics := commons.ValidatorTopicID(pk) // pmsg := newPBMsg(raw, topics[0], []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - // res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) + //}) t.Run("empty message", func(t *testing.T) { pmsg := newPBMsg([]byte{}, "xxx", []byte{}) - res := mv(context.Background(), "xxxx", pmsg) - require.Equal(t, res, pubsub.ValidationReject) + res := mv.ValidatePubsubMessage(context.Background(), "xxxx", pmsg) + require.Equal(t, pubsub.ValidationReject, res) }) // TODO: enable once topic validation is in place - // t.Run("invalid validator public key", func(t *testing.T) { + //t.Run("invalid validator public key", func(t *testing.T) { // msg, err := dummySSVConsensusMsg("10101011", 1) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) // pmsg := newPBMsg(raw, "xxx", []byte{}) - // res := mv(context.Background(), "xxxx", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "xxxx", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) - -} - -func createSharePublicKeys(n int) []string { - threshold.Init() - - var res []string - for i := 0; i < n; i++ { - sk := bls.SecretKey{} - sk.SetByCSPRNG() - pk := sk.GetPublicKey().SerializeToHexStr() - res = append(res, pk) - } - return res + //}) } func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { @@ -93,26 +104,19 @@ func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { return pmsg } -func dummySSVConsensusMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { - pk, err := hex.DecodeString(pkHex) +func dummySSVConsensusMsg(pk spectypes.ValidatorPK, height qbft.Height) (*spectypes.SSVMessage, error) { + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + ks := spectestingutils.Testing4SharesSet() + validSignedMessage := spectestingutils.TestingRoundChangeMessageWithHeightAndIdentifier(ks.Shares[1], 1, height, id[:]) + + encodedSignedMessage, err := validSignedMessage.Encode() if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) + return &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), + Data: encodedSignedMessage, }, nil } diff --git a/network/topics/params/gossipsub.go b/network/topics/params/gossipsub.go index 5e7945768d..c7d51ba8a1 100644 --- a/network/topics/params/gossipsub.go +++ b/network/topics/params/gossipsub.go @@ -6,7 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" ) -var ( +const ( // gsD topic stable mesh target count gsD = 8 // gsDlo topic stable mesh low watermark diff --git a/network/topics/params/helpers.go b/network/topics/params/helpers.go index 45e468ab63..cbbd47f502 100644 --- a/network/topics/params/helpers.go +++ b/network/topics/params/helpers.go @@ -7,10 +7,9 @@ import ( "github.com/pkg/errors" ) -// scoreByWeight provides the relevant score by the provided weight and threshold. -func scoreByWeight(maxScore float64, weight, threshold float64) float64 { - return maxScore / (weight * threshold * threshold) -} +const ( + oneEpochDuration = (12 * time.Second) * 32 +) // scoreDecay determines the decay rate from the provided time period till // the decayToZero value. Ex: ( 1 -> 0.01) @@ -19,11 +18,6 @@ func scoreDecay(totalDecayDuration time.Duration, decayIntervalDuration time.Dur return math.Pow(decayToZero, 1/ticks) } -// the cap for `inMesh` time scoring. -func inMeshCap(inMeshTime time.Duration) float64 { - return float64((3600 * time.Second) / inMeshTime) -} - // decayThreshold is used to determine the threshold from the decay limit with // a provided growth rate. This applies the decay rate to a // computed limit. diff --git a/network/topics/params/peer_score.go b/network/topics/params/peer_score.go index 91492ccb1b..a1af18d988 100644 --- a/network/topics/params/peer_score.go +++ b/network/topics/params/peer_score.go @@ -9,62 +9,77 @@ import ( ) const ( - gossipThreshold = -4000 - defaultIPColocationThreshold = 10 // TODO: check a lower value such as in ETH (3) + // Thresholds + gossipThreshold = -4000 + publishThreshold = -8000 + graylistThreshold = -16000 + acceptPXThreshold = 100 + opportunisticGraftThreshold = 5 + + // Overall parameters + topicScoreCap = 32.72 + decayInterval = 32 * (time.Second * 12) // One epoch + decayToZero = 0.01 + retainScore = 100 * 32 * 12 * time.Second + + // P5 + appSpecificWeight = 0 + + // P6 + ipColocationFactorThreshold = 10 + ipColocationFactorWeight = -topicScoreCap + + // P7 + behaviourPenaltyThreshold = 6 ) // PeerScoreThresholds returns the thresholds to use for peer scoring func PeerScoreThresholds() *pubsub.PeerScoreThresholds { return &pubsub.PeerScoreThresholds{ GossipThreshold: gossipThreshold, - PublishThreshold: -8000, - GraylistThreshold: -16000, - AcceptPXThreshold: 100, - OpportunisticGraftThreshold: 5, + PublishThreshold: publishThreshold, + GraylistThreshold: graylistThreshold, + AcceptPXThreshold: acceptPXThreshold, + OpportunisticGraftThreshold: opportunisticGraftThreshold, } } // PeerScoreParams returns peer score params according to the given options -func PeerScoreParams(oneEpoch, msgIDCacheTTL time.Duration, ipColocationWeight float64, ipColocationThreshold int, ipWhilelist ...*net.IPNet) *pubsub.PeerScoreParams { +func PeerScoreParams(oneEpoch, msgIDCacheTTL time.Duration, ipWhilelist ...*net.IPNet) *pubsub.PeerScoreParams { if oneEpoch == 0 { oneEpoch = oneEpochDuration } - maxPositiveScore := (maxInMeshScore + maxFirstDeliveryScore) * (subnetTopicsWeight) - topicScoreCap := maxPositiveScore / 4.0 // ETH divides by 2, we use lower value to reduce cap - - behaviourPenaltyThreshold := 10.0 // using a larger threshold than ETH (6) to reduce the effect of behavioural penalty - behaviourPenaltyDecay := scoreDecay(oneEpoch*10, oneEpoch) - // TODO: rate (10.0) should be injected to this function - targetVal, _ := decayConvergence(behaviourPenaltyDecay, 10.0/slotsPerEpoch) + // P7 calculation + behaviourPenaltyDecay := scoreDecay(oneEpoch*10, decayInterval) + maxAllowedRatePerDecayInterval := 10.0 + targetVal, _ := decayConvergence(behaviourPenaltyDecay, maxAllowedRatePerDecayInterval) targetVal = targetVal - behaviourPenaltyThreshold behaviourPenaltyWeight := gossipThreshold / (targetVal * targetVal) - if ipColocationWeight == 0 { - ipColocationWeight = -topicScoreCap - } - if ipColocationThreshold == 0 { - ipColocationThreshold = defaultIPColocationThreshold - } return &pubsub.PeerScoreParams{ - Topics: make(map[string]*pubsub.TopicScoreParams), + Topics: make(map[string]*pubsub.TopicScoreParams), + // Overall parameters TopicScoreCap: topicScoreCap, + DecayInterval: decayInterval, + DecayToZero: decayToZero, + RetainScore: retainScore, + SeenMsgTTL: msgIDCacheTTL, + + // P5 AppSpecificScore: func(p peer.ID) float64 { - // TODO: implement return 0 }, - AppSpecificWeight: 1, - IPColocationFactorWeight: ipColocationWeight, - IPColocationFactorThreshold: ipColocationThreshold, + AppSpecificWeight: appSpecificWeight, + + // P6 + IPColocationFactorWeight: ipColocationFactorWeight, + IPColocationFactorThreshold: ipColocationFactorThreshold, IPColocationFactorWhitelist: ipWhilelist, - SeenMsgTTL: msgIDCacheTTL, - BehaviourPenaltyWeight: behaviourPenaltyWeight, - BehaviourPenaltyThreshold: behaviourPenaltyThreshold, - BehaviourPenaltyDecay: behaviourPenaltyDecay, - DecayInterval: oneEpoch, - DecayToZero: decayToZero, - // RetainScore is the time to remember counters for a disconnected peer - // TODO: ETH uses 100 epoch, we reduced it to 10 until scoring will be more mature - RetainScore: oneEpoch * 10, + + // P7 + BehaviourPenaltyWeight: behaviourPenaltyWeight, + BehaviourPenaltyThreshold: behaviourPenaltyThreshold, + BehaviourPenaltyDecay: behaviourPenaltyDecay, } } diff --git a/network/topics/params/scores_test.go b/network/topics/params/scores_test.go index 003dc49792..04ba25895f 100644 --- a/network/topics/params/scores_test.go +++ b/network/topics/params/scores_test.go @@ -63,7 +63,7 @@ func TestTopicScoreParams(t *testing.T) { } func TestPeerScoreParams(t *testing.T) { - peerScoreParams := PeerScoreParams(oneEpochDuration, 550*(time.Millisecond*700), 0, 0) + peerScoreParams := PeerScoreParams(oneEpochDuration, 550*(time.Millisecond*700)) raw, err := peerScoreParamsString(peerScoreParams) require.NoError(t, err) require.NotNil(t, raw) diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index b7b19fc8ef..6bab61eacc 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -9,24 +9,32 @@ import ( ) const ( - gossipSubD = 8 - oneEpochDuration = (12 * time.Second) * 32 - slotsPerEpoch = 32 - // maxInMeshScore describes the max score a peer can attain from being in the mesh - maxInMeshScore = 10 - // maxFirstDeliveryScore describes the max score a peer can obtain from first deliveries - maxFirstDeliveryScore = 40 - // decayToZero specifies the terminal value that we will use when decaying a value. - decayToZero = 0.01 - // dampeningFactor reduces the amount by which the various thresholds and caps are created. - // using value of 50 (prysm changed to 90) - dampeningFactor = 50 - - subnetTopicsWeight = 4.0 -) - -const ( + // Network Topology + gossipSubD = 8 minActiveValidators = 200 + + // Overall parameters + totalTopicsWeight = 4.0 + + // P1 + maxTimeInMeshScore = 10 // max score a peer can attain from being in the mesh + timeInMeshQuantum = 12 + timeInMeshQuantumCap = 3600 + + // P2 + firstDeliveryDecayEpochs = time.Duration(4) + maxFirstDeliveryScore = 80 // max score a peer can obtain from first deliveries + + // P3 + // Mesh scording is disabled for now. + meshDeliveryDecayEpochs = time.Duration(16) + meshDeliveryDampeningFactor = 1.0 / 50.0 + meshDeliveryCapFactor = 16 + meshScoringEnabled = false + + // P4 + invalidMessageDecayEpochs = time.Duration(100) + maxInvalidMessagesAllowed = 20 ) var ( @@ -41,8 +49,6 @@ type NetworkOpts struct { ActiveValidators int // Subnets is the number of subnets in the network Subnets int - //// Groups is the amount of groups used in the network - // Groups int // OneEpochDuration is used as a time-frame length to control scoring in a dynamic way OneEpochDuration time.Duration // TotalTopicsWeight is the weight of all the topics in the network @@ -51,17 +57,33 @@ type NetworkOpts struct { // TopicOpts is the config struct for topic configurations type TopicOpts struct { - // TopicWeight is the weight of the topic - TopicWeight float64 - // ExpectedMsgRate is the expected rate for the topic - ExpectedMsgRate float64 - InvalidMsgDecayTime time.Duration - FirstMsgDecayTime time.Duration - MeshMsgDecayTime time.Duration - MeshMsgCapFactor float64 - MeshMsgActivationTime time.Duration // D is the gossip degree D int + + // ExpectedMsgRate is the expected rate for the topic + ExpectedMsgRate float64 + + // TopicWeight is the weight of the topic + TopicWeight float64 + + // P1 + MaxTimeInMeshScore float64 + TimeInMeshQuantum int + TimeInMeshQuantumCap int + + // P2 + FirstDeliveryDecayEpochs time.Duration + MaxFirstDeliveryScore float64 + + // P3 + MeshDeliveryDecayEpochs time.Duration + MeshDeliveryDampeningFactor float64 + MeshDeliveryCapFactor float64 + MeshDeliveryActivationTime time.Duration + + // P4 + InvalidMessageDecayEpochs time.Duration + MaxInvalidMessagesAllowed int } // Options is the struct used for creating topic score params @@ -71,15 +93,54 @@ type Options struct { } func (o *Options) defaults() { + // Network if o.Network.OneEpochDuration == 0 { o.Network.OneEpochDuration = oneEpochDuration } if o.Network.TotalTopicsWeight == 0 { - o.Network.TotalTopicsWeight = subnetTopicsWeight // + ... + o.Network.TotalTopicsWeight = totalTopicsWeight } + // Topic if o.Topic.D == 0 { o.Topic.D = gossipSubD } + // Topic - P1 + if o.Topic.MaxTimeInMeshScore == 0 { + o.Topic.MaxTimeInMeshScore = maxTimeInMeshScore + } + if o.Topic.TimeInMeshQuantum == 0 { + o.Topic.TimeInMeshQuantum = timeInMeshQuantum + } + if o.Topic.TimeInMeshQuantumCap == 0 { + o.Topic.TimeInMeshQuantumCap = timeInMeshQuantumCap + } + // Topic - P2 + if o.Topic.FirstDeliveryDecayEpochs == 0 { + o.Topic.FirstDeliveryDecayEpochs = firstDeliveryDecayEpochs + } + if o.Topic.MaxFirstDeliveryScore == 0 { + o.Topic.MaxFirstDeliveryScore = maxFirstDeliveryScore + } + // Topic - P3 + if o.Topic.MeshDeliveryDecayEpochs == 0 { + o.Topic.MeshDeliveryDecayEpochs = meshDeliveryDecayEpochs + } + if o.Topic.MeshDeliveryDampeningFactor == 0 { + o.Topic.MeshDeliveryDampeningFactor = meshDeliveryDampeningFactor + } + if o.Topic.MeshDeliveryCapFactor == 0 { + o.Topic.MeshDeliveryCapFactor = meshDeliveryCapFactor + } + if o.Topic.MeshDeliveryActivationTime == 0 { + o.Topic.MeshDeliveryActivationTime = o.Network.OneEpochDuration * 3 + } + // Topic - P4 + if o.Topic.InvalidMessageDecayEpochs == 0 { + o.Topic.InvalidMessageDecayEpochs = invalidMessageDecayEpochs + } + if o.Topic.MaxInvalidMessagesAllowed == 0 { + o.Topic.MaxInvalidMessagesAllowed = maxInvalidMessagesAllowed + } } func (o *Options) validate() error { @@ -91,10 +152,10 @@ func (o *Options) validate() error { // maxScore attainable by a peer func (o *Options) maxScore() float64 { - return (maxInMeshScore + maxFirstDeliveryScore) * o.Network.TotalTopicsWeight + return (o.Topic.MaxTimeInMeshScore + o.Topic.MaxFirstDeliveryScore) * o.Network.TotalTopicsWeight } -// NewOpts creates new TopicOpts instance with defaults +// NewOpts creates new TopicOpts instance func NewOpts(activeValidators, subnets int) Options { return Options{ Network: NetworkOpts{ @@ -107,70 +168,91 @@ func NewOpts(activeValidators, subnets int) Options { // NewSubnetTopicOpts creates new TopicOpts for a subnet topic func NewSubnetTopicOpts(activeValidators, subnets int) Options { + + // Create options with default values opts := NewOpts(activeValidators, subnets) opts.defaults() - opts.Topic.TopicWeight = subnetTopicsWeight / float64(opts.Network.Subnets) + + // Set topic weight with equal weights + opts.Topic.TopicWeight = opts.Network.TotalTopicsWeight / float64(opts.Network.Subnets) + + // Set expected message rate based on stage metrics validatorsPerSubnet := float64(opts.Network.ActiveValidators) / float64(opts.Network.Subnets) - valMsgsPerEpoch := 9.0 - opts.Topic.ExpectedMsgRate = validatorsPerSubnet * valMsgsPerEpoch / float64(slotsPerEpoch) - opts.Topic.FirstMsgDecayTime = time.Duration(8) - opts.Topic.MeshMsgDecayTime = time.Duration(16) - opts.Topic.MeshMsgCapFactor = 16.0 // using a large factor until we have more accurate values - opts.Topic.MeshMsgActivationTime = opts.Network.OneEpochDuration + msgsPerValidatorPerSecond := 600.0 / 10000.0 + opts.Topic.ExpectedMsgRate = validatorsPerSubnet * msgsPerValidatorPerSecond + return opts } // TopicParams creates pubsub.TopicScoreParams from the given TopicOpts -// implementation is based on ETH2.0 and prysm as a reference, with alignments to ssv: +// implementation is based on ETH2.0, with alignments to ssv: // https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { + // Validate options if err := opts.validate(); err != nil { return nil, err } + + // Set to default if not set opts.defaults() - oneSlot := opts.Network.OneEpochDuration / 32.0 - inMeshTime := oneSlot + expectedMessagesPerDecayInterval := opts.Topic.ExpectedMsgRate * decayInterval.Seconds() + + // P1 + timeInMeshCap := float64(opts.Topic.TimeInMeshQuantumCap) / float64(opts.Topic.TimeInMeshQuantum) + + // P2 + firstMessageDeliveriesDecay := scoreDecay(opts.Network.OneEpochDuration*opts.Topic.FirstDeliveryDecayEpochs, decayInterval) + firstMessageDeliveriesCap, err := decayConvergence(firstMessageDeliveriesDecay, 2*(expectedMessagesPerDecayInterval)/float64(opts.Topic.D)) + if err != nil { + return nil, errors.Wrap(err, "could not calculate decay convergence for first message delivery cap") + } + + // P3 + meshMessageDeliveriesDecay := scoreDecay(opts.Network.OneEpochDuration*opts.Topic.MeshDeliveryDecayEpochs, decayInterval) + meshMessageDeliveriesThreshold, err := decayThreshold(meshMessageDeliveriesDecay, (expectedMessagesPerDecayInterval * opts.Topic.MeshDeliveryDampeningFactor)) + if err != nil { + return nil, errors.Wrap(err, "could not calculate threshold for mesh message deliveries threshold") + } + var meshMessageDeliveriesWeight float64 + if meshScoringEnabled { + meshMessageDeliveriesWeight = -(opts.maxScore() / (opts.Topic.TopicWeight * math.Pow(meshMessageDeliveriesThreshold, 2))) + } + MeshMessageDeliveriesCap := meshMessageDeliveriesThreshold * opts.Topic.MeshDeliveryCapFactor + + // P4 + invalidMessageDeliveriesDecay := scoreDecay(opts.Topic.InvalidMessageDecayEpochs*opts.Network.OneEpochDuration, decayInterval) + invalidMessageDeliveriesWeight := graylistThreshold / (opts.Topic.TopicWeight * float64(opts.Topic.MaxInvalidMessagesAllowed) * float64(opts.Topic.MaxInvalidMessagesAllowed)) params := &pubsub.TopicScoreParams{ - TopicWeight: opts.Topic.TopicWeight, - TimeInMeshWeight: maxInMeshScore / inMeshCap(inMeshTime), - TimeInMeshQuantum: inMeshTime, - TimeInMeshCap: inMeshCap(inMeshTime), - } - - if opts.Topic.FirstMsgDecayTime > 0 { - params.FirstMessageDeliveriesDecay = scoreDecay(opts.Topic.FirstMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) - firstMsgDeliveryCap, err := decayConvergence(params.FirstMessageDeliveriesDecay, 2*opts.Topic.ExpectedMsgRate/float64(opts.Topic.D)) - if err != nil { - return nil, errors.Wrap(err, "could not calculate first msg delivery cap") - } - params.FirstMessageDeliveriesCap = firstMsgDeliveryCap - params.FirstMessageDeliveriesWeight = maxFirstDeliveryScore / firstMsgDeliveryCap - } - - if opts.Topic.MeshMsgDecayTime > 0 { - params.MeshMessageDeliveriesDecay = scoreDecay(opts.Topic.MeshMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) - // a peer must send us at least 1/50 of the regular messages in time, very conservative limit - meshMsgDeliveriesThreshold, err := decayThreshold(params.MeshMessageDeliveriesDecay, math.Min(2.0, opts.Topic.ExpectedMsgRate/dampeningFactor)) - if err != nil { - return nil, errors.Wrap(err, "could not calculate mesh message deliveries threshold") - } - params.MeshMessageDeliveriesThreshold = meshMsgDeliveriesThreshold - params.MeshMessageDeliveriesCap = opts.Topic.MeshMsgCapFactor * meshMsgDeliveriesThreshold - params.MeshMessageDeliveriesWeight = -scoreByWeight(opts.maxScore(), opts.Topic.TopicWeight, - math.Max(4.0, params.MeshMessageDeliveriesCap)) // used cap instead of threshold to reduce weight - params.MeshMessageDeliveriesActivation = opts.Topic.MeshMsgActivationTime - params.MeshMessageDeliveriesWindow = 2 * time.Second - params.MeshFailurePenaltyWeight = params.MeshMessageDeliveriesWeight - params.MeshFailurePenaltyDecay = params.MeshMessageDeliveriesDecay - } - - if opts.Topic.InvalidMsgDecayTime > 0 { - params.InvalidMessageDeliveriesWeight = -opts.maxScore() / opts.Topic.TopicWeight - params.InvalidMessageDeliveriesDecay = scoreDecay(opts.Topic.InvalidMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) - } else { - params.InvalidMessageDeliveriesDecay = 0.1 + // Topic-specific parameters + TopicWeight: opts.Topic.TopicWeight, + + // P1 + TimeInMeshQuantum: time.Duration(opts.Topic.TimeInMeshQuantum) * time.Second, + TimeInMeshCap: timeInMeshCap, + TimeInMeshWeight: opts.Topic.MaxTimeInMeshScore / timeInMeshCap, + + // P2 + FirstMessageDeliveriesDecay: firstMessageDeliveriesDecay, + FirstMessageDeliveriesCap: firstMessageDeliveriesCap, + FirstMessageDeliveriesWeight: opts.Topic.MaxFirstDeliveryScore / firstMessageDeliveriesCap, + + // P3 + MeshMessageDeliveriesDecay: meshMessageDeliveriesDecay, + MeshMessageDeliveriesThreshold: meshMessageDeliveriesThreshold, + MeshMessageDeliveriesWeight: meshMessageDeliveriesWeight, + MeshMessageDeliveriesCap: MeshMessageDeliveriesCap, + MeshMessageDeliveriesActivation: opts.Topic.MeshDeliveryActivationTime, + MeshMessageDeliveriesWindow: 2 * time.Second, + + // P3b + MeshFailurePenaltyDecay: meshMessageDeliveriesDecay, + MeshFailurePenaltyWeight: meshMessageDeliveriesWeight, + + // P4 + InvalidMessageDeliveriesDecay: invalidMessageDeliveriesDecay, + InvalidMessageDeliveriesWeight: invalidMessageDeliveriesWeight, } return params, nil diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index b4b67b4833..155b6968b4 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -26,7 +26,7 @@ const ( ) // the following are kept in vars to allow flexibility (e.g. in tests) -var ( +const ( // validationQueueSize is the size that we assign to the validation queue validationQueueSize = 512 // outboundQueueSize is the size that we assign to the outbound message queue @@ -34,32 +34,34 @@ var ( // validateThrottle is the amount of goroutines used for pubsub msg validation validateThrottle = 8192 // scoreInspectInterval is the interval for performing score inspect, which goes over all peers scores - scoreInspectInterval = time.Minute + defaultScoreInspectInterval = 5 * time.Minute // msgIDCacheTTL specifies how long a message ID will be remembered as seen, 6.4m (as ETH 2.0) msgIDCacheTTL = params.HeartbeatInterval * 550 ) -// PububConfig is the needed config to instantiate pubsub -type PububConfig struct { +// PubSubConfig is the needed config to instantiate pubsub +type PubSubConfig struct { Host host.Host TraceLog bool StaticPeers []peer.AddrInfo MsgHandler PubsubMessageHandler - // MsgValidatorFactory accepts the topic name and returns the corresponding msg validator + // MsgValidator accepts the topic name and returns the corresponding msg validator // in case we need different validators for specific topics, // this should be the place to map a validator to topic - MsgValidatorFactory func(string) MsgValidatorFunc - ScoreIndex peers.ScoreIndex - Scoring *ScoringConfig - MsgIDHandler MsgIDHandler - Discovery discovery.Discovery + MsgValidator messageValidator + ScoreIndex peers.ScoreIndex + Scoring *ScoringConfig + MsgIDHandler MsgIDHandler + Discovery discovery.Discovery ValidateThrottle int ValidationQueueSize int OutboundQueueSize int MsgIDCacheTTL time.Duration - GetValidatorStats network.GetValidatorStats + GetValidatorStats network.GetValidatorStats + ScoreInspector pubsub.ExtendedPeerScoreInspectFn + ScoreInspectorInterval time.Duration } // ScoringConfig is the configuration for peer scoring @@ -76,7 +78,7 @@ type PubsubBundle struct { Resolver MsgPeersResolver } -func (cfg *PububConfig) init() error { +func (cfg *PubSubConfig) init() error { if cfg.Host == nil { return errors.New("bad args: missing host") } @@ -96,14 +98,14 @@ func (cfg *PububConfig) init() error { } // initScoring initializes scoring config -func (cfg *PububConfig) initScoring() { +func (cfg *PubSubConfig) initScoring() { if cfg.Scoring == nil { cfg.Scoring = DefaultScoringConfig() } } -// NewPubsub creates a new pubsub router and the necessary components -func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubsub.PubSub, Controller, error) { +// NewPubSub creates a new pubsub router and the necessary components +func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig) (*pubsub.PubSub, Controller, error) { if err := cfg.init(); err != nil { return nil, nil, err } @@ -133,12 +135,23 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs } var topicScoreFactory func(string) *pubsub.TopicScoreParams - if cfg.ScoreIndex != nil { + + inspector := cfg.ScoreInspector + inspectInterval := cfg.ScoreInspectorInterval + if cfg.ScoreIndex != nil || inspector != nil { cfg.initScoring() - inspector := scoreInspector(logger, cfg.ScoreIndex) - peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPColocationWeight, 0, cfg.Scoring.IPWhilelist...) + + if inspector == nil { + inspector = scoreInspector(logger, cfg.ScoreIndex) + } + + if inspectInterval == 0 { + inspectInterval = defaultScoreInspectInterval + } + + peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPWhilelist...) psOpts = append(psOpts, pubsub.WithPeerScore(peerScoreParams, params.PeerScoreThresholds()), - pubsub.WithPeerScoreInspect(inspector, scoreInspectInterval)) + pubsub.WithPeerScoreInspect(inspector, inspectInterval)) async.Interval(ctx, time.Hour, func() { // reset peer scores metric every hour because it has a label for peer ID which can grow infinitely metricPubsubPeerScoreInspect.Reset() @@ -169,7 +182,7 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs return nil, nil, err } - ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidatorFactory, sf, ps, topicScoreFactory) + ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidator, sf, ps, topicScoreFactory) return ps, ctrl, nil } diff --git a/network/topics/scoring.go b/network/topics/scoring.go index ee0360364a..7dd0ac7064 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -1,6 +1,7 @@ package topics import ( + "math" "time" "github.com/bloxapp/ssv/logging/fields" @@ -27,20 +28,43 @@ func DefaultScoringConfig() *ScoringConfig { func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.ExtendedPeerScoreInspectFn { return func(scores map[peer.ID]*pubsub.PeerScoreSnapshot) { for pid, peerScores := range scores { - // scores := []*peers.NodeScore{ - // { - // Name: "PS_Score", - // Value: peerScores.Score, - // }, { - // Name: "PS_BehaviourPenalty", - // Value: peerScores.BehaviourPenalty, - // }, { - // Name: "PS_IPColocationFactor", - // Value: peerScores.IPColocationFactor, - // }, - //} - logger.Debug("peer scores", fields.PeerID(pid), - zap.Any("peerScores", peerScores)) + + //filter all topics that have InvalidMessageDeliveries > 0 + filtered := make(map[string]*pubsub.TopicScoreSnapshot) + var totalInvalidMessages float64 + var totalLowMeshDeliveries int + for topic, snapshot := range peerScores.Topics { + if snapshot.InvalidMessageDeliveries != 0 { + filtered[topic] = snapshot + } + if snapshot.InvalidMessageDeliveries > 0 { + totalInvalidMessages += math.Sqrt(snapshot.InvalidMessageDeliveries) + } + if snapshot.MeshMessageDeliveries < 107 { + totalLowMeshDeliveries++ + } + } + + fields := []zap.Field{ + fields.PeerID(pid), + fields.PeerScore(peerScores.Score), + zap.Any("invalid_messages", filtered), + zap.Float64("ip_colocation", peerScores.IPColocationFactor), + zap.Float64("behaviour_penalty", peerScores.BehaviourPenalty), + zap.Float64("app_specific_penalty", peerScores.AppSpecificScore), + zap.Float64("total_low_mesh_deliveries", float64(totalLowMeshDeliveries)), + zap.Float64("total_invalid_messages", totalInvalidMessages), + zap.Any("invalid_messages", filtered), + } + + // log if peer score is below threshold + if peerScores.Score < -1000 { + fields = append(fields, zap.Bool("low_score", true)) + } + + // log peer overall score and topics scores + logger.Debug("peer scores", fields...) + metricPubsubPeerScoreInspect.WithLabelValues(pid.String()).Set(peerScores.Score) // err := scoreIdx.Score(pid, scores...) // if err != nil { @@ -54,7 +78,7 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.Extend } // topicScoreParams factory for creating scoring params for topics -func topicScoreParams(logger *zap.Logger, cfg *PububConfig) func(string) *pubsub.TopicScoreParams { +func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsub.TopicScoreParams { return func(t string) *pubsub.TopicScoreParams { totalValidators, activeValidators, myValidators, err := cfg.GetValidatorStats() if err != nil { diff --git a/networkconfig/config.go b/networkconfig/config.go index de65d48fe4..a69c40ad4d 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -14,6 +14,8 @@ import ( var SupportedConfigs = map[string]NetworkConfig{ Mainnet.Name: Mainnet, + Holesky.Name: Holesky, + HoleskyStage.Name: HoleskyStage, JatoV2Stage.Name: JatoV2Stage, JatoV2.Name: JatoV2, LocalTestnet.Name: LocalTestnet, @@ -28,14 +30,15 @@ func GetNetworkConfigByName(name string) (NetworkConfig, error) { } type NetworkConfig struct { - Name string - Beacon beacon.BeaconNetwork - Domain spectypes.DomainType - GenesisEpoch spec.Epoch - RegistrySyncOffset *big.Int - RegistryContractAddr string // TODO: ethcommon.Address - Bootnodes []string - WhitelistedOperatorKeys []string + Name string + Beacon beacon.BeaconNetwork + Domain spectypes.DomainType + GenesisEpoch spec.Epoch + RegistrySyncOffset *big.Int + RegistryContractAddr string // TODO: ethcommon.Address + Bootnodes []string + WhitelistedOperatorKeys []string + PermissionlessActivationEpoch spec.Epoch } func (n NetworkConfig) String() string { @@ -61,3 +64,8 @@ func (n NetworkConfig) SlotDurationSec() time.Duration { func (n NetworkConfig) SlotsPerEpoch() uint64 { return n.Beacon.SlotsPerEpoch() } + +// GetGenesisTime returns the genesis time in unix time. +func (n NetworkConfig) GetGenesisTime() time.Time { + return time.Unix(int64(n.Beacon.MinGenesisTime()), 0) +} diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go new file mode 100644 index 0000000000..ed87c45179 --- /dev/null +++ b/networkconfig/holesky-stage.go @@ -0,0 +1,23 @@ +package networkconfig + +import ( + "math/big" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" +) + +var HoleskyStage = NetworkConfig{ + Name: "holesky-stage", + Beacon: beacon.NewNetwork(spectypes.HoleskyNetwork), + Domain: [4]byte{0x00, 0x00, 0x31, 0x12}, + GenesisEpoch: 1, + RegistrySyncOffset: new(big.Int).SetInt64(84599), + RegistryContractAddr: "0x0d33801785340072C452b994496B19f196b7eE15", + Bootnodes: []string{ + "enr:-Li4QNUN0RdeoHjI4Np18-PX1VXrJ2rJMo2OarRz0wCAxiYlD3s_E4zsmXi1LHv62ULLBT-AQfZIjYefEoEsMDkaEKCGAYtCguORh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhKfrtCyJc2VjcDI1NmsxoQP2e508AoA0B-KH-IaAd3nVCfI9q16lNztV-oTpcH72tIN0Y3CCE4mDdWRwgg-h", + }, + WhitelistedOperatorKeys: []string{}, + PermissionlessActivationEpoch: 10560, +} diff --git a/networkconfig/holesky.go b/networkconfig/holesky.go new file mode 100644 index 0000000000..ce08531948 --- /dev/null +++ b/networkconfig/holesky.go @@ -0,0 +1,23 @@ +package networkconfig + +import ( + "math/big" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" +) + +var Holesky = NetworkConfig{ + Name: "holesky", + Beacon: beacon.NewNetwork(spectypes.HoleskyNetwork), + Domain: spectypes.DomainType{0x0, 0x0, 0x5, 0x1}, + GenesisEpoch: 1, + RegistrySyncOffset: new(big.Int).SetInt64(181612), + RegistryContractAddr: "0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA", + Bootnodes: []string{ + "enr:-Li4QFIQzamdvTxGJhvcXG_DFmCeyggSffDnllY5DiU47pd_K_1MRnSaJimWtfKJ-MD46jUX9TwgW5Jqe0t4pH41RYWGAYuFnlyth2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhCLdu_SJc2VjcDI1NmsxoQN4v-N9zFYwEqzGPBBX37q24QPFvAVUtokIo1fblIsmTIN0Y3CCE4uDdWRwgg-j", + }, + WhitelistedOperatorKeys: []string{}, + PermissionlessActivationEpoch: 13950, // Nov-29-2023 12:00:00 PM UTC +} diff --git a/networkconfig/jato-v2.go b/networkconfig/jato-v2.go index 36f2775b61..f329699c54 100644 --- a/networkconfig/jato-v2.go +++ b/networkconfig/jato-v2.go @@ -33,4 +33,5 @@ var JatoV2 = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNmkwelNHRzFiaHlPZU8xVDVxc2UKOFpHbElBQ2pmemVYQzhpYVVReGVCb0dlVGRvN0tqalkwNy80b3hBNkhjdG45bEtxd1BodG5ISXIvZ1RlWXNYUwp5QVhPL1Q5K2RQcng1ZEp3SEVCdm5BcmNSQkNzaGF5Sng2S0xiZ3RJb2dGSWhkK1ptaFpiWFpWZVp5THhzK2tZCnM4djVwcHBIbWNwWHRwUVAxWm1ycndpTC9hZU5JNzczbUlrZ1pBOGdNK2Z5S2RtTGJrQXdXZWh1SXZKRmpuVCsKQlVkUHUzWGJIemU2SlJnY2NYNmZnM1gwOTJibG9VMzRxY1VIelNhWU9TZlc2TUpEbFgzQzJCeFhCZ042VFV0aQpDN2k2ZE9qaW14RzlSMkp4ZHVhZGpUeEM1MHl5OE9IVWpMVGNkc2pWRjdYNXdGUzFqaDI5aFpDY0FoeDB2NDg3CjdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBNldITnNBdTdSYnMxM0I2c0taWXgKVnZuMldlTy9YMTdSeUx1MjA0K2VtbjkvSGhIRlhXT29CMGczekNZQWp2WWdsbFJka0laTWt3ZkFUNGZvVjVTKwpvNzFFQ1dFN1ZuaytxcWd0U3k5M0ZTTVJzUG9vNngrTUd4ZURBQ3RQbDdQV1EyTXJmV1hkNzVwV1p5TVd5VndHCktPbFo0RHhoQ0VOcXlRcndlOTkybU9wVDZBcTJ1TmVsUmdESUJDSW1CV01NcUl2aXdhSU96MlBmTWR1L3ZVTWgKcVFuNGJJZjFpcVk2WGlKU1g2bDJvUWlTb09VMjRvNkFCdHlHbzRpTDJXN2tOajVUa1hOOEVzeGc3WmUveVQ0YgpKNGtvVjdmNUE3dmpMbHc1ZkdjWDR1bTBNK1QwbnczUlVIY3pHK1E3U1VGMTFGU3c0VnM1WVBHWC84a2tzdXgyCkx3SURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, + PermissionlessActivationEpoch: 220707, // Nov-29-2023 12:04:48 PM UTC } diff --git a/networkconfig/mainnet.go b/networkconfig/mainnet.go index 2326420cde..4f977f917d 100644 --- a/networkconfig/mainnet.go +++ b/networkconfig/mainnet.go @@ -40,4 +40,5 @@ var Mainnet = NetworkConfig{ "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcU5Sd0xWSHNWMEloUjdjdUJkb1AKZnVwNTkydEJFSG0vRllDREZQbERMR2NVZ2NzZ29PdHBsV2hMRjBGSzIwQ3ppVi83WVZzcWpxcDh3VDExM3pBbQoxOTZZRlN6WmUzTFhOQXFRWlBwbDlpOVJxdVJJMGlBT2xiWUp0ampJRjd2ZVZLbVdybzMwWTZDV3JPcHpVQ1BPClRGVEpGZ0hvZmtQT2pabmprNURtdDg2ZURveUxzenJQZWQ0LzlyR2NNVUp4WnJBSjEvbFR1ajNaWWVJUk0wS04KUVQ0eitPb3p0T0dBeDVVcUk2THpQL3NGOWRJM3BzM3BIb3dXOWF2RHp3Qm94Y3hWam14NWhRMXowOTN4MnlkYgpWcjgxNDgzTzdqUkt6eFpXeEduOFJzZUROZkxwSi93VFJiQ0lVOFhwUC9IKzd6TWNGMG1HbVlUcjAvcWR1bVNsCjNRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdmRWVVJ0OFgxbFA5VDVSUUdYdVkKcFpZWjVBb3VuSEdUakMvQ1FoTmQ5RC9kT2kvSDUwVW1PdVBpTzhYYUF4UFRGcGIrZ2xCeGJRRHVQUGN1cENPdQpKN09lVTBvdzdsQjVMclZlWWt3RExnSHY3bDQwcjRWVTM3NlFueGhuS0JyVHNkaWdmZHJYUWZveGRhajVQQ0VYCnFjK1ozNXFPUmpCZ3dublRlbEJjc2NLMHorSkJaQzU0OXFOWThMbm9aMTBuRFptdW1YVDlac3dISCtJVkZacDYKMEZTY0k0V1V5U1gxVnJJT2tSandoSWlCSFk3YkhrZ01Bci9xeStuRmlFUUVRV2Q2VXAwOWtkS0hNVmdtVFp4KwprQXZRbFZ0Z3luYkFPWkNMeng0Ymo1Yi9MQklIejNiTk9zWlNtR3AxWi9hWDFkd1BaMlhOai83elovNGpuM095CkdRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", }, + PermissionlessActivationEpoch: 249056, // Dec-13-2023 09:58:47 AM UTC } diff --git a/networkconfig/test-network.go b/networkconfig/test-network.go index a0e58f2819..0b8d4ca067 100644 --- a/networkconfig/test-network.go +++ b/networkconfig/test-network.go @@ -18,4 +18,5 @@ var TestNetwork = NetworkConfig{ Bootnodes: []string{ "enr:-Li4QO86ZMZr_INMW_WQBsP2jS56yjrHnZXxAUOKJz4_qFPKD1Cr3rghQD2FtXPk2_VPnJUi8BBiMngOGVXC0wTYpJGGAYgqnGSNh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQKNW0Mf-xTXcevRSkZOvoN0Q0T9OkTjGZQyQeOl3bYU3YN0Y3CCE4iDdWRwgg-g;enr:-Li4QBoH15fXLV78y1_nmD5sODveptALORh568iWLS_eju3SUvF2ZfGE2j-nERKU1zb2g5KlS8L70SRLdRUJ-pHH-fmGAYgvh9oGh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhArqAsGJc2VjcDI1NmsxoQO_tV3JP75ZUZPjhOgc2VqEu_FQEMeHc4AyOz6Lz33M2IN0Y3CCE4mDdWRwgg-h", }, + PermissionlessActivationEpoch: 123456789, } diff --git a/operator/duties/attester.go b/operator/duties/attester.go index 6af6f4abd1..f89cbaf867 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -11,19 +11,20 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type AttesterHandler struct { baseHandler - duties *Duties[*eth2apiv1.AttesterDuty] + duties *dutystore.Duties[eth2apiv1.AttesterDuty] fetchCurrentEpoch bool fetchNextEpoch bool } -func NewAttesterHandler() *AttesterHandler { +func NewAttesterHandler(duties *dutystore.Duties[eth2apiv1.AttesterDuty]) *AttesterHandler { h := &AttesterHandler{ - duties: NewDuties[*eth2apiv1.AttesterDuty](), + duties: duties, } h.fetchCurrentEpoch = true h.fetchFirst = true @@ -52,7 +53,7 @@ func (h *AttesterHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // 4. If necessary, fetch duties for the next epoch. // @@ -69,7 +70,8 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) @@ -82,7 +84,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.indicesChanged = false } h.processFetching(ctx, currentEpoch, slot) @@ -98,7 +100,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%slotsPerEpoch == slotsPerEpoch-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) } case reorgEvent := <-h.reorg: @@ -108,18 +110,18 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Previous { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true h.fetchCurrentEpoch = true if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } else if reorgEvent.Current { // reset & re-fetch next epoch duties if in appropriate slot range, // otherwise they will be fetched by the appropriate slot tick. if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -135,7 +137,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset next epoch duties if in appropriate slot range if h.shouldFetchNexEpoch(slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -164,24 +166,26 @@ func (h *AttesterHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *AttesterHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) } } + + h.executeDuties(h.logger, toExecute) } func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) + indices := h.validatorController.CommitteeActiveIndices(epoch) if len(indices) == 0 { return nil @@ -194,7 +198,7 @@ func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, true) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleAttester)) } @@ -245,8 +249,7 @@ func (h *AttesterHandler) shouldExecute(duty *eth2apiv1.AttesterDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/attester_test.go b/operator/duties/attester_test.go index e0927c1f0a..007e018e68 100644 --- a/operator/duties/attester_test.go +++ b/operator/duties/attester_test.go @@ -5,50 +5,52 @@ import ( "testing" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.AttesterDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.AttesterDuty) []*spectypes.Duty { +func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*eth2apiv1.AttesterDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleAttester)) @@ -59,15 +61,16 @@ func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.Attes func TestScheduler_Attester_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -95,15 +98,16 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { func TestScheduler_Attester_Diff_Slots(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -136,13 +140,14 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { func TestScheduler_Attester_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken mockTicker.Send(currentSlot.GetSlot()) @@ -152,7 +157,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { scheduler.indicesChg <- struct{}{} // no execution should happen in slot 0 waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -180,7 +185,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { // STEP 4: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[2]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -193,13 +198,14 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken mockTicker.Send(currentSlot.GetSlot()) @@ -213,7 +219,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(1), @@ -223,7 +229,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 4: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(2), @@ -238,7 +244,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[0]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -247,7 +253,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[1]}) + expected = expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -261,15 +267,16 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -282,8 +289,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -298,13 +305,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -341,15 +348,16 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { // reorg previous dependent root changed and the indices changed as well func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -363,8 +371,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -379,13 +387,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -398,7 +406,7 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(67), ValidatorIndex: phase0.ValidatorIndex(2), @@ -432,15 +440,16 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -453,8 +462,8 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -468,13 +477,13 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -511,15 +520,16 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { // reorg previous dependent root changed and the indices changed the same slot func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -532,8 +542,8 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -547,13 +557,13 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -566,7 +576,7 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(36), ValidatorIndex: phase0.ValidatorIndex(2), @@ -600,15 +610,16 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_Attester_Reorg_Current(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -621,8 +632,8 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -636,13 +647,13 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -687,15 +698,16 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -708,8 +720,8 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -723,13 +735,13 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -742,7 +754,7 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(65), ValidatorIndex: phase0.ValidatorIndex(2), @@ -783,15 +795,16 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Early_Block(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -817,8 +830,8 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) // STEP 4: trigger head event (block arrival) - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), }, } @@ -833,15 +846,16 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(31)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), @@ -869,15 +883,16 @@ func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(13)) - scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, mockTicker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index 15303fef68..f75e6a99fb 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -3,11 +3,11 @@ package duties import ( "context" - "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/slotticker" ) //go:generate mockgen -package=duties -destination=./base_handler_mock.go -source=./base_handler.go @@ -16,8 +16,9 @@ import ( type ExecuteDutiesFunc func(logger *zap.Logger, duties []*spectypes.Duty) type dutyHandler interface { - Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, chan phase0.Slot, chan ReorgEvent, chan struct{}) + Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, slotticker.Provider, chan ReorgEvent, chan struct{}) HandleDuties(context.Context) + HandleInitialDuties(context.Context) Name() string } @@ -27,7 +28,7 @@ type baseHandler struct { network networkconfig.NetworkConfig validatorController ValidatorController executeDuties ExecuteDutiesFunc - ticker chan phase0.Slot + ticker slotticker.SlotTicker reorg chan ReorgEvent indicesChange chan struct{} @@ -43,7 +44,7 @@ func (h *baseHandler) Setup( network networkconfig.NetworkConfig, validatorController ValidatorController, executeDuties ExecuteDutiesFunc, - ticker chan phase0.Slot, + slotTickerProvider slotticker.Provider, reorgEvents chan ReorgEvent, indicesChange chan struct{}, ) { @@ -52,28 +53,16 @@ func (h *baseHandler) Setup( h.network = network h.validatorController = validatorController h.executeDuties = executeDuties - h.ticker = ticker + h.ticker = slotTickerProvider() h.reorg = reorgEvents h.indicesChange = indicesChange } -type Duties[D any] struct { - m map[phase0.Epoch]map[phase0.Slot][]D +func (h *baseHandler) warnMisalignedSlotAndDuty(dutyType string) { + h.logger.Debug("current slot and duty slot are not aligned, "+ + "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", dutyType)) } -func NewDuties[D any]() *Duties[D] { - return &Duties[D]{ - m: make(map[phase0.Epoch]map[phase0.Slot][]D), - } -} - -func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, duty D) { - if _, ok := d.m[epoch]; !ok { - d.m[epoch] = make(map[phase0.Slot][]D) - } - d.m[epoch][slot] = append(d.m[epoch][slot], duty) -} - -func (d *Duties[D]) Reset(epoch phase0.Epoch) { - delete(d.m, epoch) +func (b *baseHandler) HandleInitialDuties(context.Context) { + // Do nothing } diff --git a/operator/duties/base_handler_mock.go b/operator/duties/base_handler_mock.go index 801ca2dc8c..4181282362 100644 --- a/operator/duties/base_handler_mock.go +++ b/operator/duties/base_handler_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" networkconfig "github.com/bloxapp/ssv/networkconfig" + slotticker "github.com/bloxapp/ssv/operator/slotticker" gomock "github.com/golang/mock/gomock" zap "go.uber.org/zap" ) @@ -49,6 +49,18 @@ func (mr *MockdutyHandlerMockRecorder) HandleDuties(arg0 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleDuties", reflect.TypeOf((*MockdutyHandler)(nil).HandleDuties), arg0) } +// HandleInitialDuties mocks base method. +func (m *MockdutyHandler) HandleInitialDuties(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandleInitialDuties", arg0) +} + +// HandleInitialDuties indicates an expected call of HandleInitialDuties. +func (mr *MockdutyHandlerMockRecorder) HandleInitialDuties(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleInitialDuties", reflect.TypeOf((*MockdutyHandler)(nil).HandleInitialDuties), arg0) +} + // Name mocks base method. func (m *MockdutyHandler) Name() string { m.ctrl.T.Helper() @@ -64,7 +76,7 @@ func (mr *MockdutyHandlerMockRecorder) Name() *gomock.Call { } // Setup mocks base method. -func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 chan phase0.Slot, arg7 chan ReorgEvent, arg8 chan struct{}) { +func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 slotticker.Provider, arg7 chan ReorgEvent, arg8 chan struct{}) { m.ctrl.T.Helper() m.ctrl.Call(m, "Setup", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } diff --git a/operator/duties/dutystore/duties.go b/operator/duties/dutystore/duties.go new file mode 100644 index 0000000000..50fd0d7e22 --- /dev/null +++ b/operator/duties/dutystore/duties.go @@ -0,0 +1,97 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type Duty interface { + eth2apiv1.AttesterDuty | eth2apiv1.ProposerDuty | eth2apiv1.SyncCommitteeDuty +} + +type dutyDescriptor[D Duty] struct { + duty *D + inCommittee bool +} + +type Duties[D Duty] struct { + mu sync.RWMutex + m map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D] +} + +func NewDuties[D Duty]() *Duties[D] { + return &Duties[D]{ + m: make(map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]), + } +} + +func (d *Duties[D]) CommitteeSlotDuties(epoch phase0.Epoch, slot phase0.Slot) []*D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + var duties []*D + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *Duties[D]) ValidatorDuty(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex) *D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + descriptor, ok := descriptorMap[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex, duty *D, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[epoch]; !ok { + d.m[epoch] = make(map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + if _, ok := d.m[epoch][slot]; !ok { + d.m[epoch][slot] = make(map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + d.m[epoch][slot][validatorIndex] = dutyDescriptor[D]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *Duties[D]) ResetEpoch(epoch phase0.Epoch) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, epoch) +} diff --git a/operator/duties/dutystore/store.go b/operator/duties/dutystore/store.go new file mode 100644 index 0000000000..53dbfaefcc --- /dev/null +++ b/operator/duties/dutystore/store.go @@ -0,0 +1,19 @@ +package dutystore + +import ( + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" +) + +type Store struct { + Attester *Duties[eth2apiv1.AttesterDuty] + Proposer *Duties[eth2apiv1.ProposerDuty] + SyncCommittee *SyncCommitteeDuties +} + +func New() *Store { + return &Store{ + Attester: NewDuties[eth2apiv1.AttesterDuty](), + Proposer: NewDuties[eth2apiv1.ProposerDuty](), + SyncCommittee: NewSyncCommitteeDuties(), + } +} diff --git a/operator/duties/dutystore/sync_committee.go b/operator/duties/dutystore/sync_committee.go new file mode 100644 index 0000000000..0ae13041c7 --- /dev/null +++ b/operator/duties/dutystore/sync_committee.go @@ -0,0 +1,76 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type SyncCommitteeDuties struct { + mu sync.RWMutex + m map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty] +} + +func NewSyncCommitteeDuties() *SyncCommitteeDuties { + return &SyncCommitteeDuties{ + m: make(map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]), + } +} + +func (d *SyncCommitteeDuties) CommitteePeriodDuties(period uint64) []*eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + descriptorMap, ok := d.m[period] + if !ok { + return nil + } + + var duties []*eth2apiv1.SyncCommitteeDuty + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *SyncCommitteeDuties) Duty(period uint64, validatorIndex phase0.ValidatorIndex) *eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + duties, ok := d.m[period] + if !ok { + return nil + } + + descriptor, ok := duties[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *SyncCommitteeDuties) Add(period uint64, validatorIndex phase0.ValidatorIndex, duty *eth2apiv1.SyncCommitteeDuty, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[period]; !ok { + d.m[period] = make(map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]) + } + + d.m[period][validatorIndex] = dutyDescriptor[eth2apiv1.SyncCommitteeDuty]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *SyncCommitteeDuties) Reset(period uint64) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, period) +} diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/mocks/scheduler.go index 00cd929622..7195d58dcd 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/mocks/scheduler.go @@ -7,13 +7,13 @@ package mocks import ( context "context" reflect "reflect" + time "time" client "github.com/attestantio/go-eth2-client" v1 "github.com/attestantio/go-eth2-client/api/v1" phase0 "github.com/attestantio/go-eth2-client/spec/phase0" types "github.com/bloxapp/ssv/protocol/v2/types" gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" ) // MockSlotTicker is a mock of SlotTicker interface. @@ -39,18 +39,32 @@ func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { return m.recorder } -// Subscribe mocks base method. -func (m *MockSlotTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) return ret0 } -// Subscribe indicates an expected call of Subscribe. -func (mr *MockSlotTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockSlotTicker)(nil).Subscribe), subscription) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) } // MockBeaconNode is a mock of BeaconNode interface. @@ -186,18 +200,32 @@ func (m *MockValidatorController) EXPECT() *MockValidatorControllerMockRecorder return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockValidatorController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockValidatorController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockValidatorControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).AllActiveIndices), epoch) +} + +// CommitteeActiveIndices mocks base method. +func (m *MockValidatorController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockValidatorControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockValidatorControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockValidatorController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).CommitteeActiveIndices), epoch) } // GetOperatorShares mocks base method. diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 60fde29186..31c4aa50bb 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -11,17 +11,18 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type ProposerHandler struct { baseHandler - duties *Duties[*eth2apiv1.ProposerDuty] + duties *dutystore.Duties[eth2apiv1.ProposerDuty] } -func NewProposerHandler() *ProposerHandler { +func NewProposerHandler(duties *dutystore.Duties[eth2apiv1.ProposerDuty]) *ProposerHandler { return &ProposerHandler{ - duties: NewDuties[*eth2apiv1.ProposerDuty](), + duties: duties, baseHandler: baseHandler{ fetchFirst: true, }, @@ -44,7 +45,7 @@ func (h *ProposerHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // // On Ticker event: @@ -58,7 +59,8 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) @@ -71,7 +73,6 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) h.indicesChanged = false h.processFetching(ctx, currentEpoch, slot) } @@ -79,7 +80,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%h.network.Beacon.SlotsPerEpoch() == h.network.Beacon.SlotsPerEpoch()-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch - 1) h.fetchFirst = true } @@ -90,7 +91,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Current { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true } @@ -105,6 +106,12 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { } } +func (h *ProposerHandler) HandleInitialDuties(ctx context.Context) { + slot := h.network.Beacon.EstimatedCurrentSlot() + epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) + h.processFetching(ctx, epoch, slot) +} + func (h *ProposerHandler) processFetching(ctx context.Context, epoch phase0.Epoch, slot phase0.Slot) { ctx, cancel := context.WithDeadline(ctx, h.network.Beacon.GetSlotStartTime(slot+1).Add(100*time.Millisecond)) defer cancel() @@ -116,36 +123,46 @@ func (h *ProposerHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *ProposerHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) } } + h.executeDuties(h.logger, toExecute) } func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) - if len(indices) == 0 { + allIndices := h.validatorController.AllActiveIndices(epoch) + if len(allIndices) == 0 { return nil } - duties, err := h.beaconNode.ProposerDuties(ctx, epoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(epoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.ProposerDuties(ctx, epoch, allIndices) if err != nil { return fmt.Errorf("failed to fetch proposer duties: %w", err) } + h.duties.ResetEpoch(epoch) + specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, inCommitteeDuty) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleProposer)) } @@ -174,8 +191,7 @@ func (h *ProposerHandler) shouldExecute(duty *eth2apiv1.ProposerDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/proposer_test.go b/operator/duties/proposer_test.go index 8df730b6d3..45d13f1454 100644 --- a/operator/duties/proposer_test.go +++ b/operator/duties/proposer_test.go @@ -4,48 +4,50 @@ import ( "context" "testing" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().ProposerDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.ProposerDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.ProposerDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.ProposerDuty) []*spectypes.Duty { +func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*eth2apiv1.ProposerDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleProposer)) @@ -55,15 +57,16 @@ func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.Propo func TestScheduler_Proposer_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -87,15 +90,16 @@ func TestScheduler_Proposer_Same_Slot(t *testing.T) { func TestScheduler_Proposer_Diff_Slots(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -129,13 +133,14 @@ func TestScheduler_Proposer_Diff_Slots(t *testing.T) { // execute duty after two slots after the indices changed func TestScheduler_Proposer_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken ticker.Send(currentSlot.GetSlot()) @@ -148,7 +153,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -178,7 +183,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 4: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -191,15 +196,16 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -215,7 +221,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(2), @@ -225,7 +231,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 5}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(3), @@ -239,7 +245,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 5: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -248,7 +254,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -257,7 +263,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -271,15 +277,16 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -292,8 +299,8 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -307,13 +314,13 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -346,15 +353,16 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) + startFn() - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -367,8 +375,8 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -382,13 +390,13 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -401,7 +409,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(38), ValidatorIndex: phase0.ValidatorIndex(2), @@ -417,7 +425,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 7: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(37)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -426,7 +434,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 8: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(38)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index cb1f5861c6..70870d60b8 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -11,6 +11,8 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/sourcegraph/conc/pool" "go.uber.org/zap" @@ -19,11 +21,26 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/protocol/v2/types" ) //go:generate mockgen -package=mocks -destination=./mocks/scheduler.go -source=./scheduler.go +var slotDelayHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "slot_ticker_delay_milliseconds", + Help: "The delay in milliseconds of the slot ticker", + Buckets: []float64{5, 10, 20, 100, 500, 5000}, // Buckets in milliseconds. Adjust as per your needs. +}) + +func init() { + logger := zap.L() + if err := prometheus.Register(slotDelayHistogram); err != nil { + logger.Debug("could not register prometheus collector") + } +} + const ( // blockPropagationDelay time to propagate around the nodes // before kicking off duties for the block's slot. @@ -31,7 +48,8 @@ const ( ) type SlotTicker interface { - Subscribe(subscription chan phase0.Slot) event.Subscription + Next() <-chan time.Time + Slot() phase0.Slot } type BeaconNode interface { @@ -45,7 +63,8 @@ type BeaconNode interface { // ValidatorController represents the component that controls validators via the scheduler type ValidatorController interface { - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetOperatorShares() []*types.SSVShare } @@ -58,15 +77,16 @@ type SchedulerOptions struct { ValidatorController ValidatorController ExecuteDuty ExecuteDutyFunc IndicesChg chan struct{} - Ticker SlotTicker + SlotTickerProvider slotticker.Provider BuilderProposals bool + DutyStore *dutystore.Store } type Scheduler struct { beaconNode BeaconNode network networkconfig.NetworkConfig validatorController ValidatorController - slotTicker SlotTicker + slotTickerProvider slotticker.Provider executeDuty ExecuteDutyFunc builderProposals bool @@ -75,7 +95,7 @@ type Scheduler struct { reorg chan ReorgEvent indicesChg chan struct{} - ticker chan phase0.Slot + ticker slotticker.SlotTicker waitCond *sync.Cond pool *pool.ContextPool @@ -86,10 +106,15 @@ type Scheduler struct { } func NewScheduler(opts *SchedulerOptions) *Scheduler { + dutyStore := opts.DutyStore + if dutyStore == nil { + dutyStore = dutystore.New() + } + s := &Scheduler{ beaconNode: opts.BeaconNode, network: opts.Network, - slotTicker: opts.Ticker, + slotTickerProvider: opts.SlotTickerProvider, executeDuty: opts.ExecuteDuty, validatorController: opts.ValidatorController, builderProposals: opts.BuilderProposals, @@ -97,12 +122,12 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { blockPropagateDelay: blockPropagationDelay, handlers: []dutyHandler{ - NewAttesterHandler(), - NewProposerHandler(), - NewSyncCommitteeHandler(), + NewAttesterHandler(dutyStore.Attester), + NewProposerHandler(dutyStore.Proposer), + NewSyncCommitteeHandler(dutyStore.SyncCommittee), }, - ticker: make(chan phase0.Slot), + ticker: opts.SlotTickerProvider(), reorg: make(chan ReorgEvent), waitCond: sync.NewCond(&sync.Mutex{}), } @@ -118,6 +143,9 @@ type ReorgEvent struct { Current bool } +// Start initializes the Scheduler and begins its operation. +// Note: This function includes blocking operations, especially within the handler's HandleInitialDuties call, +// which will block until initial duties are fully handled. func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { logger = logger.Named(logging.NameDutyScheduler) logger.Info("duty scheduler started") @@ -135,8 +163,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { for _, handler := range s.handlers { handler := handler - slotTicker := make(chan phase0.Slot) - s.slotTicker.Subscribe(slotTicker) indicesChangeCh := make(chan struct{}) indicesChangeFeed.Subscribe(indicesChangeCh) @@ -150,11 +176,14 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { s.network, s.validatorController, s.ExecuteDuties, - slotTicker, + s.slotTickerProvider, reorgCh, indicesChangeCh, ) + // This call is blocking + handler.HandleInitialDuties(ctx) + s.pool.Go(func(ctx context.Context) error { // Wait for the head event subscription to complete before starting the handler. handler.HandleDuties(ctx) @@ -162,7 +191,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { }) } - s.slotTicker.Subscribe(s.ticker) go s.SlotTicker(ctx) go indicesChangeFeed.FanOut(ctx, s.indicesChg) @@ -214,7 +242,9 @@ func (s *Scheduler) SlotTicker(ctx context.Context) { select { case <-ctx.Done(): return - case slot := <-s.ticker: + case <-s.ticker.Next(): + slot := s.ticker.Slot() + delay := s.network.SlotDurationSec() / time.Duration(goclient.IntervalsPerSlot) /* a third of the slot duration */ finalTime := s.network.Beacon.GetSlotStartTime(slot).Add(delay) waitDuration := time.Until(finalTime) @@ -322,6 +352,11 @@ func (s *Scheduler) ExecuteDuties(logger *zap.Logger, duties []*spectypes.Duty) for _, duty := range duties { duty := duty logger := s.loggerWithDutyContext(logger, duty) + slotDelay := time.Since(s.network.Beacon.GetSlotStartTime(duty.Slot)) + if slotDelay >= 100*time.Millisecond { + logger.Debug("⚠️ late duty execution", zap.Int64("slot_delay", slotDelay.Milliseconds())) + } + slotDelayHistogram.Observe(float64(slotDelay.Milliseconds())) go func() { if duty.Type == spectypes.BNRoleAttester || duty.Type == spectypes.BNRoleSyncCommittee { s.waitOneThirdOrValidBlock(duty.Slot) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 342ba9e0cd..ba00907f1b 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -17,35 +17,81 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties/mocks" - mockslotticker "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + mockslotticker "github.com/bloxapp/ssv/operator/slotticker/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) +type MockSlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot + Subscribe() chan phase0.Slot +} + type mockSlotTicker struct { - event.Feed + slotChan chan phase0.Slot + timeChan chan time.Time + slot phase0.Slot + mu sync.Mutex +} + +func NewMockSlotTicker() MockSlotTicker { + ticker := &mockSlotTicker{ + slotChan: make(chan phase0.Slot), + timeChan: make(chan time.Time), + } + ticker.start() + return ticker +} + +func (m *mockSlotTicker) start() { + go func() { + for slot := range m.slotChan { + m.mu.Lock() + m.slot = slot + m.mu.Unlock() + m.timeChan <- time.Now() + } + }() +} + +func (m *mockSlotTicker) Next() <-chan time.Time { + return m.timeChan } -func (m *mockSlotTicker) Subscribe(subscriber chan phase0.Slot) event.Subscription { - return m.Feed.Subscribe(subscriber) +func (m *mockSlotTicker) Slot() phase0.Slot { + m.mu.Lock() + defer m.mu.Unlock() + return m.slot +} + +func (m *mockSlotTicker) Subscribe() chan phase0.Slot { + return m.slotChan +} + +type mockSlotTickerService struct { + event.Feed } func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *SlotValue) ( *Scheduler, *zap.Logger, - *mockSlotTicker, + *mockSlotTickerService, time.Duration, context.CancelFunc, *pool.ContextPool, + func(), ) { ctrl := gomock.NewController(t) - timeout := 100 * time.Millisecond + // A 200ms timeout ensures the test passes, even with mockSlotTicker overhead. + timeout := 200 * time.Millisecond ctx, cancel := context.WithCancel(context.Background()) logger := logging.TestLogger(t) mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := &mockSlotTicker{} + mockSlotService := &mockSlotTickerService{} mockNetworkConfig := networkconfig.NetworkConfig{ Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), } @@ -55,8 +101,12 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot BeaconNode: mockBeaconNode, Network: mockNetworkConfig, ValidatorController: mockValidatorController, - Ticker: mockTicker, - BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + ticker := NewMockSlotTicker() + mockSlotService.Subscribe(ticker.Subscribe()) + return ticker + }, + BuilderProposals: false, } s := NewScheduler(opts) @@ -94,16 +144,19 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot s.network.Beacon.(*mocknetwork.MockBeaconNetwork).EXPECT().EpochsPerSyncCommitteePeriod().Return(uint64(256)).AnyTimes() - err := s.Start(ctx, logger) - require.NoError(t, err) - // Create a pool to wait for the scheduler to finish. schedulerPool := pool.New().WithErrors().WithContext(ctx) - schedulerPool.Go(func(ctx context.Context) error { - return s.Wait() - }) - return s, logger, mockTicker, timeout, cancel, schedulerPool + startFunction := func() { + err := s.Start(ctx, logger) + require.NoError(t, err) + + schedulerPool.Go(func(ctx context.Context) error { + return s.Wait() + }) + } + + return s, logger, mockSlotService, timeout, cancel, schedulerPool, startFunction } func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.Duty, executeDutiesCallSize int) { @@ -199,18 +252,23 @@ func TestScheduler_Run(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers mockDutyHandler1 := NewMockdutyHandler(ctrl) mockDutyHandler2 := NewMockdutyHandler(ctrl) + mockDutyHandler1.EXPECT().HandleInitialDuties(gomock.Any()).AnyTimes() + mockDutyHandler2.EXPECT().HandleInitialDuties(gomock.Any()).AnyTimes() + opts := &SchedulerOptions{ Ctx: ctx, BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, } s := NewScheduler(opts) @@ -218,7 +276,7 @@ func TestScheduler_Run(t *testing.T) { s.handlers = []dutyHandler{mockDutyHandler1, mockDutyHandler2} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() // setup mock duty handler expectations for _, mockDutyHandler := range s.handlers { @@ -248,7 +306,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers opts := &SchedulerOptions{ @@ -256,8 +314,10 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, - IndicesChg: make(chan struct{}), + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, + IndicesChg: make(chan struct{}), BuilderProposals: true, } @@ -267,7 +327,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { // add multiple mock duty handlers s.handlers = []dutyHandler{NewValidatorRegistrationHandler()} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() err := s.Start(ctx, logger) require.NoError(t, err) diff --git a/operator/duties/synccommittee.go b/operator/duties/sync_committee.go similarity index 82% rename from operator/duties/synccommittee.go rename to operator/duties/sync_committee.go index 0569d7cbfd..e5fb76a25e 100644 --- a/operator/duties/synccommittee.go +++ b/operator/duties/sync_committee.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) // syncCommitteePreparationEpochs is the number of epochs ahead of the sync committee @@ -21,14 +22,14 @@ var syncCommitteePreparationEpochs = uint64(2) type SyncCommitteeHandler struct { baseHandler - duties *SyncCommitteeDuties + duties *dutystore.SyncCommitteeDuties fetchCurrentPeriod bool fetchNextPeriod bool } -func NewSyncCommitteeHandler() *SyncCommitteeHandler { +func NewSyncCommitteeHandler(duties *dutystore.SyncCommitteeDuties) *SyncCommitteeHandler { h := &SyncCommitteeHandler{ - duties: NewSyncCommitteeDuties(), + duties: duties, } h.fetchCurrentPeriod = true h.fetchFirst = true @@ -39,27 +40,6 @@ func (h *SyncCommitteeHandler) Name() string { return spectypes.BNRoleSyncCommittee.String() } -type SyncCommitteeDuties struct { - m map[uint64][]*eth2apiv1.SyncCommitteeDuty -} - -func NewSyncCommitteeDuties() *SyncCommitteeDuties { - return &SyncCommitteeDuties{ - m: make(map[uint64][]*eth2apiv1.SyncCommitteeDuty), - } -} - -func (d *SyncCommitteeDuties) Add(period uint64, duty *eth2apiv1.SyncCommitteeDuty) { - if _, ok := d.m[period]; !ok { - d.m[period] = []*eth2apiv1.SyncCommitteeDuty{} - } - d.m[period] = append(d.m[period], duty) -} - -func (d *SyncCommitteeDuties) Reset(period uint64) { - delete(d.m, period) -} - // HandleDuties manages the duty lifecycle, handling different cases: // // On First Run: @@ -73,7 +53,7 @@ func (d *SyncCommitteeDuties) Reset(period uint64) { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current period. +// 2. ResetEpoch duties for the current period. // 3. Fetch duties for the current period. // 4. If necessary, fetch duties for the next period. // @@ -92,7 +72,8 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) buildStr := fmt.Sprintf("p%v-%v-s%v-#%v", period, epoch, slot, slot%32+1) @@ -100,15 +81,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { if h.fetchFirst { h.fetchFirst = false - h.indicesChanged = false h.processFetching(ctx, period, slot) h.processExecution(period, slot) } else { h.processExecution(period, slot) - if h.indicesChanged { - h.duties.Reset(period) - h.indicesChanged = false - } h.processFetching(ctx, period, slot) } @@ -123,7 +99,7 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { // last slot of period if slot == h.network.Beacon.LastSlotOfSyncPeriod(period) { - h.duties.Reset(period) + h.duties.Reset(period - 1) } case reorgEvent := <-h.reorg: @@ -146,18 +122,27 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { buildStr := fmt.Sprintf("p%v-e%v-s%v-#%v", period, epoch, slot, slot%32+1) h.logger.Info("🔁 indices change received", zap.String("period_epoch_slot_seq", buildStr)) - h.indicesChanged = true h.fetchCurrentPeriod = true // reset next period duties if in appropriate slot range if h.shouldFetchNextPeriod(slot) { - h.duties.Reset(period + 1) h.fetchNextPeriod = true } } } } +func (h *SyncCommitteeHandler) HandleInitialDuties(ctx context.Context) { + slot := h.network.Beacon.EstimatedCurrentSlot() + epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) + period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + h.processFetching(ctx, period, slot) + // At the init time we may not have enough duties to fetch + // we should not set those values to false in processFetching() call + h.fetchNextPeriod = true + h.fetchCurrentPeriod = true +} + func (h *SyncCommitteeHandler) processFetching(ctx context.Context, period uint64, slot phase0.Slot) { ctx, cancel := context.WithDeadline(ctx, h.network.Beacon.GetSlotStartTime(slot+1).Add(100*time.Millisecond)) defer cancel() @@ -181,16 +166,19 @@ func (h *SyncCommitteeHandler) processFetching(ctx context.Context, period uint6 func (h *SyncCommitteeHandler) processExecution(period uint64, slot phase0.Slot) { // range over duties and execute - if duties, ok := h.duties.m[period]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d, slot) { - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) - } + duties := h.duties.CommitteePeriodDuties(period) + if duties == nil { + return + } + + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d, slot) { + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) } - h.executeDuties(h.logger, toExecute) } + h.executeDuties(h.logger, toExecute) } func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period uint64) error { @@ -202,19 +190,26 @@ func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period } lastEpoch := h.network.Beacon.FirstEpochOfSyncPeriod(period+1) - 1 - indices := h.validatorController.ActiveValidatorIndices(firstEpoch) - - if len(indices) == 0 { + allActiveIndices := h.validatorController.AllActiveIndices(firstEpoch) + if len(allActiveIndices) == 0 { return nil } - duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(firstEpoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, allActiveIndices) if err != nil { return fmt.Errorf("failed to fetch sync committee duties: %w", err) } + h.duties.Reset(period) for _, d := range duties { - h.duties.Add(period, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(period, d.ValidatorIndex, d, inCommitteeDuty) } h.prepareDutiesResultLog(period, duties, start) @@ -276,8 +271,7 @@ func (h *SyncCommitteeHandler) shouldExecute(duty *eth2apiv1.SyncCommitteeDuty, return true } if currentSlot+1 == slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/synccommittee_test.go b/operator/duties/sync_committee_test.go similarity index 89% rename from operator/duties/synccommittee_test.go rename to operator/duties/sync_committee_test.go index 774cc2c2a5..76d04f8a58 100644 --- a/operator/duties/synccommittee_test.go +++ b/operator/duties/sync_committee_test.go @@ -12,6 +12,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) @@ -55,23 +56,24 @@ func setupSyncCommitteeDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[uint64, [ return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getDuties := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) - duties, _ := dutiesMap.Get(period) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + duties, _ := dutiesMap.Get(period) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() @@ -89,13 +91,14 @@ func expectedExecutedSyncCommitteeDuties(handler *SyncCommitteeHandler, duties [ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ { @@ -148,13 +151,14 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 49)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ { @@ -215,13 +219,14 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ { @@ -269,13 +274,14 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() // STEP 1: wait for no action to be taken ticker.Send(currentSlot.GetSlot()) @@ -327,13 +333,14 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ { @@ -399,13 +406,14 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(256*32 - 3)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(1, []*v1.SyncCommitteeDuty{ { @@ -479,13 +487,14 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) - scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) + scheduler, logger, ticker, timeout, cancel, schedulerPool, startFn := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupSyncCommitteeDutiesMock(scheduler, dutiesMap) + startFn() dutiesMap.Set(0, []*v1.SyncCommitteeDuty{ { diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index 2ac3a49ea3..e8b6b79210 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -6,22 +6,16 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" ) const validatorRegistrationEpochInterval = uint64(10) type ValidatorRegistrationHandler struct { baseHandler - - validatorsPassedFirstRegistration map[string]struct{} } func NewValidatorRegistrationHandler() *ValidatorRegistrationHandler { - return &ValidatorRegistrationHandler{ - validatorsPassedFirstRegistration: map[string]struct{}{}, - } + return &ValidatorRegistrationHandler{} } func (h *ValidatorRegistrationHandler) Name() string { @@ -36,21 +30,19 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() shares := h.validatorController.GetOperatorShares() - sent := 0 + validators := []phase0.ValidatorIndex{} for _, share := range shares { - if !share.HasBeaconMetadata() { + if !share.HasBeaconMetadata() || !share.BeaconMetadata.IsAttesting() { continue } // if not passed first registration, should be registered within one epoch time in a corresponding slot // if passed first registration, should be registered within validatorRegistrationEpochInterval epochs time in a corresponding slot - registrationSlotInterval := h.network.SlotsPerEpoch() - if _, ok := h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)]; ok { - registrationSlotInterval *= validatorRegistrationEpochInterval - } + registrationSlotInterval := h.network.SlotsPerEpoch() * validatorRegistrationEpochInterval if uint64(share.BeaconMetadata.Index)%registrationSlotInterval != uint64(slot)%registrationSlotInterval { continue @@ -66,10 +58,11 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { // no need for other params }}) - sent++ - h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)] = struct{}{} + validators = append(validators, share.BeaconMetadata.Index) } - h.logger.Debug("validator registration duties sent", zap.Uint64("slot", uint64(slot)), fields.Count(sent)) + h.logger.Debug("validator registration duties sent", + zap.Uint64("slot", uint64(slot)), + zap.Any("validators", validators)) case <-h.indicesChange: continue diff --git a/operator/fee_recipient/controller.go b/operator/fee_recipient/controller.go index 477b40eed1..d44f20caca 100644 --- a/operator/fee_recipient/controller.go +++ b/operator/fee_recipient/controller.go @@ -10,7 +10,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/registry/storage" @@ -25,42 +25,40 @@ type RecipientController interface { // ControllerOptions holds the needed dependencies type ControllerOptions struct { - Ctx context.Context - BeaconClient beaconprotocol.BeaconNode - Network networkconfig.NetworkConfig - ShareStorage storage.Shares - RecipientStorage storage.Recipients - Ticker slot_ticker.Ticker - OperatorData *storage.OperatorData + Ctx context.Context + BeaconClient beaconprotocol.BeaconNode + Network networkconfig.NetworkConfig + ShareStorage storage.Shares + RecipientStorage storage.Recipients + SlotTickerProvider slotticker.Provider + OperatorData *storage.OperatorData } // recipientController implementation of RecipientController type recipientController struct { - ctx context.Context - beaconClient beaconprotocol.BeaconNode - network networkconfig.NetworkConfig - shareStorage storage.Shares - recipientStorage storage.Recipients - ticker slot_ticker.Ticker - operatorData *storage.OperatorData + ctx context.Context + beaconClient beaconprotocol.BeaconNode + network networkconfig.NetworkConfig + shareStorage storage.Shares + recipientStorage storage.Recipients + slotTickerProvider slotticker.Provider + operatorData *storage.OperatorData } func NewController(opts *ControllerOptions) *recipientController { return &recipientController{ - ctx: opts.Ctx, - beaconClient: opts.BeaconClient, - network: opts.Network, - shareStorage: opts.ShareStorage, - recipientStorage: opts.RecipientStorage, - ticker: opts.Ticker, - operatorData: opts.OperatorData, + ctx: opts.Ctx, + beaconClient: opts.BeaconClient, + network: opts.Network, + shareStorage: opts.ShareStorage, + recipientStorage: opts.RecipientStorage, + slotTickerProvider: opts.SlotTickerProvider, + operatorData: opts.OperatorData, } } func (rc *recipientController) Start(logger *zap.Logger) { - tickerChan := make(chan phase0.Slot, 32) - rc.ticker.Subscribe(tickerChan) - rc.listenToTicker(logger, tickerChan) + rc.listenToTicker(logger) } // listenToTicker loop over the given slot channel @@ -68,16 +66,19 @@ func (rc *recipientController) Start(logger *zap.Logger) { // in addition, submitting "same data" every slot is not efficient and can overload beacon node // instead we can subscribe to beacon node events and submit only when there is // a new fee recipient event (or new validator) was handled or when there is a syncing issue with beacon node -func (rc *recipientController) listenToTicker(logger *zap.Logger, slots chan phase0.Slot) { +func (rc *recipientController) listenToTicker(logger *zap.Logger) { firstTimeSubmitted := false - for currentSlot := range slots { + ticker := rc.slotTickerProvider() + for { + <-ticker.Next() + slot := ticker.Slot() // submit if first time or if first slot in epoch - if firstTimeSubmitted && uint64(currentSlot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { + if firstTimeSubmitted && uint64(slot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { continue } firstTimeSubmitted = true - err := rc.prepareAndSubmit(logger, currentSlot) + err := rc.prepareAndSubmit(logger, slot) if err != nil { logger.Warn("could not submit proposal preparations", zap.Error(err)) } diff --git a/operator/fee_recipient/controller_test.go b/operator/fee_recipient/controller_test.go index 02bf4144dd..6e1718afd6 100644 --- a/operator/fee_recipient/controller_test.go +++ b/operator/fee_recipient/controller_test.go @@ -13,13 +13,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/golang/mock/gomock" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + "github.com/bloxapp/ssv/operator/slotticker/mocks" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -52,32 +52,47 @@ func TestSubmitProposal(t *testing.T) { t.Run("submit first time or halfway through epoch", func(t *testing.T) { numberOfRequests := 4 var wg sync.WaitGroup + wg.Add(numberOfRequests) // Set up the wait group before starting goroutines + client := beacon.NewMockBeaconNode(ctrl) client.EXPECT().SubmitProposalPreparation(gomock.Any()).DoAndReturn(func(feeRecipients map[phase0.ValidatorIndex]bellatrix.ExecutionAddress) error { wg.Done() return nil - }).MinTimes(numberOfRequests).MaxTimes(numberOfRequests) // call first time and on the halfway through epoch. each time should be 2 request as we have two batches + }).Times(numberOfRequests) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 1 // first time - time.Sleep(time.Millisecond * 500) - subscription <- 2 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- 20 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- phase0.Slot(network.SlotsPerEpoch()) / 2 // halfway through epoch - time.Sleep(time.Millisecond * 500) - subscription <- 63 // should not call submit - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time) + mockSlotChan := make(chan phase0.Slot) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().DoAndReturn(func() phase0.Slot { + return <-mockSlotChan + }).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) - wg.Add(numberOfRequests) + + slots := []phase0.Slot{ + 1, // first time + 2, // should not call submit + 20, // should not call submit + phase0.Slot(network.SlotsPerEpoch()) / 2, // halfway through epoch + 63, // should not call submit + } + + for _, s := range slots { + mockTimeChan <- time.Now() + mockSlotChan <- s + time.Sleep(time.Millisecond * 500) + } + wg.Wait() + + close(mockTimeChan) // Close the channel after test + close(mockSlotChan) }) t.Run("error handling", func(t *testing.T) { @@ -88,18 +103,21 @@ func TestSubmitProposal(t *testing.T) { return errors.New("failed to submit") }).MinTimes(2).MaxTimes(2) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 100 // first time - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time, 1) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().Return(phase0.Slot(100)).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) + mockTimeChan <- time.Now() wg.Add(2) wg.Wait() + close(mockTimeChan) }) } diff --git a/operator/node.go b/operator/node.go index 3dc3589349..55b8fde4cf 100644 --- a/operator/node.go +++ b/operator/node.go @@ -15,8 +15,9 @@ import ( "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/fee_recipient" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" @@ -40,18 +41,16 @@ type Options struct { DB basedb.Database ValidatorController validator.Controller ValidatorOptions validator.ControllerOptions `yaml:"ValidatorOptions"` - - WS api.WebSocketServer - WsAPIPort int - - Metrics nodeMetrics + DutyStore *dutystore.Store + WS api.WebSocketServer + WsAPIPort int + Metrics nodeMetrics } // operatorNode implements Node interface type operatorNode struct { network networkconfig.NetworkConfig context context.Context - ticker slot_ticker.Ticker validatorsCtrl validator.Controller consensusClient beaconprotocol.BeaconNode executionClient *executionclient.ExecutionClient @@ -68,7 +67,7 @@ type operatorNode struct { } // New is the constructor of operatorNode -func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { +func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provider) Node { storageMap := qbftstorage.NewStores() roles := []spectypes.BeaconRole{ @@ -85,7 +84,6 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { node := &operatorNode{ context: opts.Context, - ticker: slotTicker, validatorsCtrl: opts.ValidatorController, network: opts.Network, consensusClient: opts.BeaconNode, @@ -100,17 +98,18 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { ValidatorController: opts.ValidatorController, IndicesChg: opts.ValidatorController.IndicesChangeChan(), ExecuteDuty: opts.ValidatorController.ExecuteDuty, - Ticker: slotTicker, BuilderProposals: opts.ValidatorOptions.BuilderProposals, + DutyStore: opts.DutyStore, + SlotTickerProvider: slotTickerProvider, }), feeRecipientCtrl: fee_recipient.NewController(&fee_recipient.ControllerOptions{ - Ctx: opts.Context, - BeaconClient: opts.BeaconNode, - Network: opts.Network, - ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), - RecipientStorage: opts.ValidatorOptions.RegistryStorage, - Ticker: slotTicker, - OperatorData: opts.ValidatorOptions.OperatorData, + Ctx: opts.Context, + BeaconClient: opts.BeaconNode, + Network: opts.Network, + ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), + RecipientStorage: opts.ValidatorOptions.RegistryStorage, + OperatorData: opts.ValidatorOptions.OperatorData, + SlotTickerProvider: slotTickerProvider, }), ws: opts.WS, @@ -140,7 +139,12 @@ func (n *operatorNode) Start(logger *zap.Logger) error { } }() - go n.ticker.Start(logger) + // Start the duty scheduler, and a background goroutine to crash the node + // in case there were any errors. + if err := n.dutyScheduler.Start(n.context, logger); err != nil { + return fmt.Errorf("failed to run duty scheduler: %w", err) + } + n.validatorsCtrl.StartNetworkHandlers() n.validatorsCtrl.StartValidators() go n.net.UpdateSubnets(logger) @@ -149,12 +153,6 @@ func (n *operatorNode) Start(logger *zap.Logger) error { go n.feeRecipientCtrl.Start(logger) go n.validatorsCtrl.UpdateValidatorMetaDataLoop() - // Start the duty scheduler, and a background goroutine to crash the node - // in case there were any errors. - if err := n.dutyScheduler.Start(n.context, logger); err != nil { - return fmt.Errorf("failed to run duty scheduler: %w", err) - } - if err := n.dutyScheduler.Wait(); err != nil { logger.Fatal("duty scheduler exited with error", zap.Error(err)) } diff --git a/operator/slot_ticker/mocks/ticker.go b/operator/slot_ticker/mocks/ticker.go deleted file mode 100644 index 2ed11c9fb9..0000000000 --- a/operator/slot_ticker/mocks/ticker.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./ticker.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" - gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" - zap "go.uber.org/zap" -) - -// MockTicker is a mock of Ticker interface. -type MockTicker struct { - ctrl *gomock.Controller - recorder *MockTickerMockRecorder -} - -// MockTickerMockRecorder is the mock recorder for MockTicker. -type MockTickerMockRecorder struct { - mock *MockTicker -} - -// NewMockTicker creates a new mock instance. -func NewMockTicker(ctrl *gomock.Controller) *MockTicker { - mock := &MockTicker{ctrl: ctrl} - mock.recorder = &MockTickerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTicker) EXPECT() *MockTickerMockRecorder { - return m.recorder -} - -// Start mocks base method. -func (m *MockTicker) Start(logger *zap.Logger) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start", logger) -} - -// Start indicates an expected call of Start. -func (mr *MockTickerMockRecorder) Start(logger interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockTicker)(nil).Start), logger) -} - -// Subscribe mocks base method. -func (m *MockTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) - return ret0 -} - -// Subscribe indicates an expected call of Subscribe. -func (mr *MockTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockTicker)(nil).Subscribe), subscription) -} diff --git a/operator/slot_ticker/slotticker.go b/operator/slot_ticker/slotticker.go deleted file mode 100644 index dbb1fc033e..0000000000 --- a/operator/slot_ticker/slotticker.go +++ /dev/null @@ -1,88 +0,0 @@ -package slot_ticker - -import ( - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" -) - -// The TTicker interface defines a type which can expose a -// receive-only channel firing slot events. -type TTicker interface { - C() <-chan phase0.Slot - Done() -} - -// SlotTicker is a special ticker for the beacon chain block. -// The channel emits over the slot interval, and ensures that -// the ticks are in line with the genesis time. This means that -// the duration between the ticks and the genesis time are always a -// multiple of the slot duration. -// In addition, the channel returns the new slot number. -type SlotTicker struct { - c chan phase0.Slot - done chan struct{} -} - -// C returns the ticker channel. Call Cancel afterwards to ensure -// that the goroutine exits cleanly. -func (s *SlotTicker) C() <-chan phase0.Slot { - return s.c -} - -// Done should be called to clean up the ticker. -func (s *SlotTicker) Done() { - go func() { - s.done <- struct{}{} - }() -} - -// NewSlotTicker starts and returns a new SlotTicker instance. -func NewSlotTicker(genesisTime time.Time, secondsPerSlot uint64) *SlotTicker { - if genesisTime.IsZero() { - panic("zero genesis time") - } - ticker := &SlotTicker{ - c: make(chan phase0.Slot), - done: make(chan struct{}), - } - ticker.start(genesisTime, secondsPerSlot, time.Since, time.Until, time.After) - return ticker -} - -func (s *SlotTicker) start( - genesisTime time.Time, - secondsPerSlot uint64, - since, until func(time.Time) time.Duration, - after func(time.Duration) <-chan time.Time) { - - d := time.Duration(secondsPerSlot) * time.Second - - go func() { - sinceGenesis := since(genesisTime) - - var nextTickTime time.Time - var slot phase0.Slot - if sinceGenesis < d { - // Handle when the current time is before the genesis time. - nextTickTime = genesisTime - slot = 0 - } else { - nextTick := sinceGenesis.Truncate(d) + d - nextTickTime = genesisTime.Add(nextTick) - slot = phase0.Slot(nextTick / d) - } - - for { - waitTime := until(nextTickTime) - select { - case <-after(waitTime): - s.c <- slot - slot++ - nextTickTime = nextTickTime.Add(d) - case <-s.done: - return - } - } - }() -} diff --git a/operator/slot_ticker/ticker.go b/operator/slot_ticker/ticker.go deleted file mode 100644 index 06cbe39604..0000000000 --- a/operator/slot_ticker/ticker.go +++ /dev/null @@ -1,84 +0,0 @@ -package slot_ticker - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/prysmaticlabs/prysm/v4/async/event" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/networkconfig" -) - -//go:generate mockgen -package=mocks -destination=./mocks/ticker.go -source=./ticker.go - -type Ticker interface { - // Start ticker process - Start(logger *zap.Logger) - // Subscribe to ticker chan - Subscribe(subscription chan phase0.Slot) event.Subscription -} - -type ticker struct { - ctx context.Context - network networkconfig.NetworkConfig - - // chan - feed *event.Feed -} - -// NewTicker returns Ticker struct pointer -func NewTicker(ctx context.Context, network networkconfig.NetworkConfig) Ticker { - return &ticker{ - ctx: ctx, - network: network, - feed: &event.Feed{}, - } -} - -// Start slot ticker -func (t *ticker) Start(logger *zap.Logger) { - genesisTime := time.Unix(int64(t.network.Beacon.MinGenesisTime()), 0) - slotTicker := NewSlotTicker(genesisTime, uint64(t.network.SlotDurationSec().Seconds())) - t.listenToTicker(logger, slotTicker.C()) -} - -// Subscribe will trigger every slot -func (t *ticker) Subscribe(subscription chan phase0.Slot) event.Subscription { - return t.feed.Subscribe(subscription) -} - -// listenToTicker loop over the given slot channel -func (t *ticker) listenToTicker(logger *zap.Logger, slots <-chan phase0.Slot) { - for currentSlot := range slots { - currentEpoch := t.network.Beacon.EstimatedEpochAtSlot(currentSlot) - buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, currentSlot, currentSlot%32+1) - logger.Debug("📅 slot ticker", zap.String("epoch_slot_seq", buildStr)) - if !t.genesisEpochEffective(logger) { - continue - } - // notify current slot to channel - _ = t.feed.Send(currentSlot) - } -} - -func (t *ticker) genesisEpochEffective(logger *zap.Logger) bool { - curSlot := t.network.Beacon.EstimatedCurrentSlot() - genSlot := t.network.Beacon.GetEpochFirstSlot(t.network.GenesisEpoch) - if curSlot < genSlot { - if t.network.Beacon.IsFirstSlotOfEpoch(curSlot) { - // wait until genesis epoch starts - curEpoch := t.network.Beacon.EstimatedCurrentEpoch() - gnsTime := t.network.Beacon.GetSlotStartTime(genSlot) - logger.Info("duties paused, will resume duties on genesis epoch", - zap.Uint64("genesis_epoch", uint64(t.network.GenesisEpoch)), - zap.Uint64("current_epoch", uint64(curEpoch)), - zap.String("genesis_time", gnsTime.Format(time.UnixDate))) - } - return false - } - - return true -} diff --git a/operator/slotticker/mocks/slotticker.go b/operator/slotticker/mocks/slotticker.go new file mode 100644 index 0000000000..f8e56df5b1 --- /dev/null +++ b/operator/slotticker/mocks/slotticker.go @@ -0,0 +1,115 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./slotticker.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + gomock "github.com/golang/mock/gomock" +) + +// MockSlotTicker is a mock of SlotTicker interface. +type MockSlotTicker struct { + ctrl *gomock.Controller + recorder *MockSlotTickerMockRecorder +} + +// MockSlotTickerMockRecorder is the mock recorder for MockSlotTicker. +type MockSlotTickerMockRecorder struct { + mock *MockSlotTicker +} + +// NewMockSlotTicker creates a new mock instance. +func NewMockSlotTicker(ctrl *gomock.Controller) *MockSlotTicker { + mock := &MockSlotTicker{ctrl: ctrl} + mock.recorder = &MockSlotTickerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { + return m.recorder +} + +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) +} + +// MockConfigProvider is a mock of ConfigProvider interface. +type MockConfigProvider struct { + ctrl *gomock.Controller + recorder *MockConfigProviderMockRecorder +} + +// MockConfigProviderMockRecorder is the mock recorder for MockConfigProvider. +type MockConfigProviderMockRecorder struct { + mock *MockConfigProvider +} + +// NewMockConfigProvider creates a new mock instance. +func NewMockConfigProvider(ctrl *gomock.Controller) *MockConfigProvider { + mock := &MockConfigProvider{ctrl: ctrl} + mock.recorder = &MockConfigProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConfigProvider) EXPECT() *MockConfigProviderMockRecorder { + return m.recorder +} + +// GetGenesisTime mocks base method. +func (m *MockConfigProvider) GetGenesisTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGenesisTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetGenesisTime indicates an expected call of GetGenesisTime. +func (mr *MockConfigProviderMockRecorder) GetGenesisTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGenesisTime", reflect.TypeOf((*MockConfigProvider)(nil).GetGenesisTime)) +} + +// SlotDurationSec mocks base method. +func (m *MockConfigProvider) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockConfigProviderMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockConfigProvider)(nil).SlotDurationSec)) +} diff --git a/operator/slotticker/slotticker.go b/operator/slotticker/slotticker.go new file mode 100644 index 0000000000..74e6511092 --- /dev/null +++ b/operator/slotticker/slotticker.go @@ -0,0 +1,96 @@ +package slotticker + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +//go:generate mockgen -package=mocks -destination=./mocks/slotticker.go -source=./slotticker.go + +type Provider func() SlotTicker + +type SlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot +} + +type ConfigProvider interface { + SlotDurationSec() time.Duration + GetGenesisTime() time.Time +} + +type Config struct { + slotDuration time.Duration + genesisTime time.Time +} + +func (cfg Config) SlotDurationSec() time.Duration { + return cfg.slotDuration +} + +func (cfg Config) GetGenesisTime() time.Time { + return cfg.genesisTime +} + +type slotTicker struct { + timer *time.Timer + slotDuration time.Duration + genesisTime time.Time + slot phase0.Slot +} + +// New returns a goroutine-free SlotTicker implementation which is not thread-safe. +func New(cfgProvider ConfigProvider) *slotTicker { + genesisTime := cfgProvider.GetGenesisTime() + slotDuration := cfgProvider.SlotDurationSec() + + now := time.Now() + timeSinceGenesis := now.Sub(genesisTime) + + var initialDelay time.Duration + if timeSinceGenesis < 0 { + // Genesis time is in the future + initialDelay = -timeSinceGenesis // Wait until the genesis time + } else { + slotsSinceGenesis := timeSinceGenesis / slotDuration + nextSlotStartTime := genesisTime.Add((slotsSinceGenesis + 1) * slotDuration) + initialDelay = time.Until(nextSlotStartTime) + } + + return &slotTicker{ + timer: time.NewTimer(initialDelay), + slotDuration: slotDuration, + genesisTime: genesisTime, + slot: 0, + } +} + +// Next returns a channel that signals when the next slot should start. +// Note: This function is not thread-safe and should be called in a serialized fashion. +// Make sure no concurrent calls happen, as it can result in unexpected behavior. +func (s *slotTicker) Next() <-chan time.Time { + timeSinceGenesis := time.Since(s.genesisTime) + if timeSinceGenesis < 0 { + return s.timer.C + } + if !s.timer.Stop() { + // try to drain the channel, but don't block if there's no value + select { + case <-s.timer.C: + default: + } + } + slotNumber := uint64(timeSinceGenesis / s.slotDuration) + nextSlotStartTime := s.genesisTime.Add(time.Duration(slotNumber+1) * s.slotDuration) + s.timer.Reset(time.Until(nextSlotStartTime)) + s.slot = phase0.Slot(slotNumber + 1) + return s.timer.C +} + +// Slot returns the current slot number. +// Note: Like the Next function, this method is also not thread-safe. +// It should be called in a serialized manner after calling Next. +func (s *slotTicker) Slot() phase0.Slot { + return s.slot +} diff --git a/operator/slotticker/slotticker_test.go b/operator/slotticker/slotticker_test.go new file mode 100644 index 0000000000..044e945829 --- /dev/null +++ b/operator/slotticker/slotticker_test.go @@ -0,0 +1,179 @@ +package slotticker + +import ( + "sync" + "testing" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/cornelk/hashmap/assert" + "github.com/stretchr/testify/require" +) + +func TestSlotTicker(t *testing.T) { + const numTicks = 3 + slotDuration := 200 * time.Millisecond + // Set the genesis time such that we start from slot 1 + genesisTime := time.Now().Truncate(slotDuration).Add(-slotDuration) + + // Calculate the expected starting slot based on genesisTime + timeSinceGenesis := time.Since(genesisTime) + expectedSlot := phase0.Slot(timeSinceGenesis/slotDuration) + 1 + + ticker := New(Config{slotDuration, genesisTime}) + + for i := 0; i < numTicks; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, expectedSlot, slot) + expectedSlot++ + } +} + +func TestTickerInitialization(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + start := time.Now() + <-ticker.Next() + slot := ticker.Slot() + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + elapsed := time.Since(start) + assert.True(t, elapsed+buffer >= slotDuration, "First tick occurred too soon: %v", elapsed.String()) + require.Equal(t, phase0.Slot(1), slot) +} + +func TestSlotNumberConsistency(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + var lastSlot phase0.Slot + + for i := 0; i < 10; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, lastSlot+1, slot) + lastSlot = slot + } +} + +func TestGenesisInFuture(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now().Add(1 * time.Second) // Setting genesis time 1s in the future + + ticker := New(Config{slotDuration, genesisTime}) + start := time.Now() + + <-ticker.Next() + + // The first tick should occur after the genesis time + expectedFirstTickDuration := genesisTime.Sub(start) + actualFirstTickDuration := time.Since(start) + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + assert.True(t, actualFirstTickDuration+buffer >= expectedFirstTickDuration, "First tick occurred too soon. Expected at least: %v, but got: %v", expectedFirstTickDuration.String(), actualFirstTickDuration.String()) +} + +func TestBoundedDrift(t *testing.T) { + slotDuration := 20 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + ticks := 100 + + start := time.Now() + for i := 0; i < ticks; i++ { + <-ticker.Next() + } + expectedDuration := time.Duration(ticks) * slotDuration + elapsed := time.Since(start) + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed >= expectedDuration-buffer && elapsed <= expectedDuration+buffer, "Drifted too far from expected time. Expected: %v, Actual: %v", expectedDuration.String(), elapsed.String()) +} + +func TestMultipleSlotTickers(t *testing.T) { + const ( + numTickers = 1000 + ticksPerTimer = 3 + ) + + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + // Start the clock to time the full execution of all tickers + start := time.Now() + + var wg sync.WaitGroup + wg.Add(numTickers) + + for i := 0; i < numTickers; i++ { + go func() { + defer wg.Done() + ticker := New(Config{slotDuration, genesisTime}) + for j := 0; j < ticksPerTimer; j++ { + <-ticker.Next() + } + }() + } + + wg.Wait() + + // Calculate the total time taken for all tickers to complete their ticks + elapsed := time.Since(start) + expectedDuration := slotDuration * ticksPerTimer + + // We'll allow a small buffer for drift, say 5% + buffer := expectedDuration * 5 / 100 + assert.True(t, elapsed <= expectedDuration+buffer, "Expected all tickers to complete within", expectedDuration.String(), "but took", elapsed.String()) +} + +func TestSlotSkipping(t *testing.T) { + const ( + numTicks = 100 + skipInterval = 10 // Introduce a delay every 10 ticks + slotDuration = 20 * time.Millisecond + ) + + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + var lastSlot phase0.Slot + for i := 1; i <= numTicks; i++ { // Starting loop from 1 for ease of skipInterval check + select { + case <-ticker.Next(): + slot := ticker.Slot() + + // Ensure we never receive slots out of order or repeatedly + require.Equal(t, slot, lastSlot+1, "Expected slot %d to be one more than the last slot %d", slot, lastSlot) + lastSlot = slot + + // If it's the 10th tick or any multiple thereof + if i%skipInterval == 0 { + // Introduce delay to skip a slot + time.Sleep(slotDuration) + + // Ensure the next slot we receive is exactly 2 slots ahead of the previous slot + <-ticker.Next() + slotAfterDelay := ticker.Slot() + require.Equal(t, lastSlot+2, slotAfterDelay, "Expected to skip a slot after introducing a delay") + + // Update the slot variable to use this new slot for further iterations + lastSlot = slotAfterDelay + } + + case <-time.After(2 * slotDuration): // Fail if we don't get a tick within a reasonable time + t.Fatalf("Did not receive expected tick for iteration %d", i) + } + } +} diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 17dcfddc82..366b4dba0e 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -5,6 +5,7 @@ import ( "crypto/rsa" "encoding/hex" "encoding/json" + "fmt" "sync" "time" @@ -22,8 +23,10 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" nodestorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" @@ -34,7 +37,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -76,7 +78,9 @@ type ControllerOptions struct { NewDecidedHandler qbftcontroller.NewDecidedHandler DutyRoles []spectypes.BeaconRole StorageMap *storage.QBFTStores - Metrics validatorMetrics + Metrics validator.Metrics + MessageValidator validation.MessageValidator + ValidatorsMap *validatorsmap.ValidatorsMap // worker flags WorkersCount int `yaml:"MsgWorkersCount" env:"MSG_WORKERS_COUNT" env-default:"256" env-description:"Number of goroutines to use for message workers"` @@ -88,7 +92,8 @@ type ControllerOptions struct { // it takes care of bootstrapping, updating and managing existing validators and their shares type Controller interface { StartValidators() - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetValidator(pubKey string) (*validator.Validator, bool) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) UpdateValidatorMetaDataLoop() @@ -104,7 +109,7 @@ type Controller interface { IndicesChangeChan() chan struct{} StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient common.Address) error @@ -120,7 +125,7 @@ type controller struct { context context.Context logger *zap.Logger - metrics validatorMetrics + metrics validator.Metrics sharesStorage registrystorage.Shares operatorsStorage registrystorage.Operators @@ -134,8 +139,8 @@ type controller struct { operatorData *registrystorage.OperatorData operatorDataMutex sync.RWMutex - validatorsMap *validatorsMap - validatorOptions *validator.Options + validatorsMap *validatorsmap.ValidatorsMap + validatorOptions validator.Options metadataUpdateInterval time.Duration @@ -144,6 +149,7 @@ type controller struct { messageRouter *messageRouter messageWorker *worker.Worker historySyncBatchSize int + messageValidator validation.MessageValidator // nonCommittees is a cache of initialized nonCommitteeValidator instances nonCommitteeValidators *ttlcache.Cache[spectypes.MessageID, *nonCommitteeValidator] @@ -156,7 +162,7 @@ type controller struct { // NewController creates a new validator controller instance func NewController(logger *zap.Logger, options ControllerOptions) Controller { - logger.Debug("setting validator controller") + logger.Debug("setting up validator controller") // lookup in a map that holds all relevant operators operatorsIDs := &sync.Map{} @@ -167,10 +173,10 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Buffer: options.QueueBufferSize, } - validatorOptions := &validator.Options{ //TODO add vars + validatorOptions := validator.Options{ //TODO add vars Network: options.Network, Beacon: options.Beacon, - BeaconNetwork: options.BeaconNetwork.BeaconNetwork, + BeaconNetwork: options.BeaconNetwork.GetNetwork(), Storage: options.StorageMap, //Share: nil, // set per validator Signer: options.KeyManager, @@ -181,6 +187,8 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Exporter: options.Exporter, BuilderProposals: options.BuilderProposals, GasLimit: options.GasLimit, + MessageValidator: options.MessageValidator, + Metrics: options.Metrics, } // If full node, increase queue size to make enough room @@ -192,13 +200,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { } } - if options.Metrics == nil { - options.Metrics = nopMetrics{} + metrics := validator.Metrics(validator.NopMetrics{}) + if options.Metrics != nil { + metrics = options.Metrics } ctrl := controller{ logger: logger.Named(logging.NameController), - metrics: options.Metrics, + metrics: metrics, sharesStorage: options.RegistryStorage.Shares(), operatorsStorage: options.RegistryStorage, recipientsStorage: options.RegistryStorage, @@ -210,14 +219,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { keyManager: options.KeyManager, network: options.Network, - validatorsMap: newValidatorsMap(options.Context, validatorOptions), + validatorsMap: options.ValidatorsMap, validatorOptions: validatorOptions, metadataUpdateInterval: options.MetadataUpdateInterval, operatorsIDs: operatorsIDs, - messageRouter: newMessageRouter(), + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, workerCfg), historySyncBatchSize: options.HistorySyncBatchSize, @@ -226,6 +235,8 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { ), metadataLastUpdated: make(map[string]time.Time), indicesChange: make(chan struct{}), + + messageValidator: options.MessageValidator, } // Start automatic expired item deletion in nonCommitteeValidators. @@ -236,22 +247,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { // setupNetworkHandlers registers all the required handlers for sync protocols func (c *controller) setupNetworkHandlers() error { - syncHandlers := []*p2pprotocol.SyncHandler{ - p2pprotocol.WithHandler( - p2pprotocol.LastDecidedProtocol, - handlers.LastDecidedHandler(c.logger, c.ibftStorageMap, c.network), - ), - } - if c.validatorOptions.FullNode { - syncHandlers = append( - syncHandlers, - p2pprotocol.WithHandler( - p2pprotocol.DecidedHistoryProtocol, - // TODO: extract maxBatch to config - handlers.HistoryHandler(c.logger, c.ibftStorageMap, c.network, c.historySyncBatchSize), - ), - ) - } + syncHandlers := []*p2pprotocol.SyncHandler{} c.logger.Debug("setting up network handlers", zap.Int("count", len(syncHandlers)), zap.Bool("full_node", c.validatorOptions.FullNode), @@ -315,12 +311,12 @@ func (c *controller) handleRouterMessages() { pk := msg.GetID().GetPubKey() hexPK := hex.EncodeToString(pk) if v, ok := c.validatorsMap.GetValidator(hexPK); ok { - v.HandleMessage(c.logger, &msg) - } else { + v.HandleMessage(c.logger, msg) + } else if c.validatorOptions.Exporter { if msg.MsgType != spectypes.SSVConsensusMsgType { continue // not supporting other types } - if !c.messageWorker.TryEnqueue(&msg) { // start to save non committee decided messages only post fork + if !c.messageWorker.TryEnqueue(msg) { // start to save non committee decided messages only post fork c.logger.Warn("Failed to enqueue post consensus message: buffer is full") } } @@ -336,7 +332,7 @@ var nonCommitteeValidatorTTLs = map[spectypes.BeaconRole]phase0.Slot{ spectypes.BNRoleSyncCommitteeContribution: 4, } -func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { +func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { // Get or create a nonCommitteeValidator for this MessageID, and lock it to prevent // other handlers from processing var ncv *nonCommitteeValidator @@ -354,7 +350,7 @@ func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { return errors.Errorf("could not find validator [%s]", hex.EncodeToString(msg.GetID().GetPubKey())) } - opts := *c.validatorOptions + opts := c.validatorOptions opts.SSVShare = share ncv = &nonCommitteeValidator{ NonCommitteeValidator: validator.NewNonCommitteeValidator(c.logger, msg.GetID(), opts), @@ -389,12 +385,38 @@ func (c *controller) StartValidators() { return } - shares := c.sharesStorage.List(nil, registrystorage.ByOperatorID(c.GetOperatorData().ID), registrystorage.ByNotLiquidated()) + shares := c.sharesStorage.List(nil, registrystorage.ByNotLiquidated()) if len(shares) == 0 { c.logger.Info("could not find validators") return } - c.setupValidators(shares) + + var ownShares []*ssvtypes.SSVShare + var allPubKeys = make([][]byte, 0, len(shares)) + ownOpID := c.GetOperatorData().ID + for _, share := range shares { + if share.BelongsToOperator(ownOpID) { + ownShares = append(ownShares, share) + } + allPubKeys = append(allPubKeys, share.ValidatorPubKey) + } + + // Start own validators. + c.setupValidators(ownShares) + + // Fetch metadata for all validators. + start := time.Now() + err := beaconprotocol.UpdateValidatorsMetadata(c.logger, allPubKeys, c, c.beacon, c.onMetadataUpdated) + if err != nil { + c.logger.Error("failed to update validators metadata after setup", + zap.Int("shares", len(allPubKeys)), + fields.Took(time.Since(start)), + zap.Error(err)) + } else { + c.logger.Debug("updated validators metadata after setup", + zap.Int("shares", len(allPubKeys)), + fields.Took(time.Since(start))) + } } // setupValidators setup and starts validators from the given shares. @@ -421,22 +443,6 @@ func (c *controller) setupValidators(shares []*ssvtypes.SSVShare) { c.logger.Info("setup validators done", zap.Int("map size", c.validatorsMap.Size()), zap.Int("failures", len(errs)), zap.Int("missing_metadata", len(fetchMetadata)), zap.Int("shares", len(shares)), zap.Int("started", started)) - - // Try to fetch metadata once for validators that don't have it. - if len(fetchMetadata) > 0 { - start := time.Now() - err := beaconprotocol.UpdateValidatorsMetadata(c.logger, fetchMetadata, c, c.beacon, c.onMetadataUpdated) - if err != nil { - c.logger.Error("failed to update validators metadata after setup", - zap.Int("shares", len(fetchMetadata)), - fields.Took(time.Since(start)), - zap.Error(err)) - } else { - c.logger.Debug("updated validators metadata after setup", - zap.Int("shares", len(fetchMetadata)), - fields.Took(time.Since(start))) - } - } } // setupNonCommitteeValidators trigger SyncHighestDecided for each validator @@ -459,25 +465,7 @@ func (c *controller) setupNonCommitteeValidators() { pubKeys := make([][]byte, 0, len(nonCommitteeShares)) for _, validatorShare := range nonCommitteeShares { pubKeys = append(pubKeys, validatorShare.ValidatorPubKey) - - opts := *c.validatorOptions - opts.SSVShare = validatorShare - allRoles := []spectypes.BeaconRole{ - spectypes.BNRoleAttester, - spectypes.BNRoleAggregator, - spectypes.BNRoleProposer, - spectypes.BNRoleSyncCommittee, - spectypes.BNRoleSyncCommitteeContribution, - } - for _, role := range allRoles { - messageID := spectypes.NewMsgID(ssvtypes.GetDefaultDomain(), validatorShare.ValidatorPubKey, role) - err := c.network.SyncHighestDecided(messageID) - if err != nil { - c.logger.Error("failed to sync highest decided", zap.Error(err)) - } - } } - if len(pubKeys) > 0 { c.logger.Debug("updating metadata for non-committee validators", zap.Int("count", len(pubKeys))) if err := beaconprotocol.UpdateValidatorsMetadata(c.logger, pubKeys, c, c.beacon, c.onMetadataUpdated); err != nil { @@ -548,7 +536,7 @@ func (c *controller) UpdateValidatorMetadata(pk string, metadata *beaconprotocol return nil } -// GetValidator returns a validator instance from validatorsMap +// GetValidator returns a validator instance from ValidatorsMap func (c *controller) GetValidator(pubKey string) (*validator.Validator, bool) { return c.validatorsMap.GetValidator(pubKey) } @@ -565,7 +553,7 @@ func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) { logger.Error("could not create duty execute msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) if err != nil { logger.Error("could not decode duty execute msg", zap.Error(err)) return @@ -601,25 +589,36 @@ func CreateDutyExecuteMsg(duty *spectypes.Duty, pubKey phase0.BLSPubKey, domain }, nil } -// ActiveValidatorIndices fetches indices of validators who are either attesting or queued and +// CommitteeActiveIndices fetches indices of in-committee validators who are either attesting or queued and // whose activation epoch is not greater than the passed epoch. It logs a warning if an error occurs. -func (c *controller) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { - indices := make([]phase0.ValidatorIndex, 0, len(c.validatorsMap.validatorsMap)) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { - // Beacon node throws error when trying to fetch duties for non-existing validators. - if (v.Share.BeaconMetadata.IsAttesting() || v.Share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && - v.Share.BeaconMetadata.ActivationEpoch <= epoch { +func (c *controller) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + validators := c.validatorsMap.GetAll() + indices := make([]phase0.ValidatorIndex, 0, len(validators)) + for _, v := range validators { + if isShareActive(epoch)(v.Share) { indices = append(indices, v.Share.BeaconMetadata.Index) } - return nil - }) - if err != nil { - c.logger.Warn("failed to get all validators public keys", zap.Error(err)) } + return indices +} +func (c *controller) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + shares := c.sharesStorage.List(nil, isShareActive(epoch)) + indices := make([]phase0.ValidatorIndex, len(shares)) + for i, share := range shares { + indices[i] = share.BeaconMetadata.Index + } return indices } +func isShareActive(epoch phase0.Epoch) func(share *ssvtypes.SSVShare) bool { + return func(share *ssvtypes.SSVShare) bool { + return share != nil && share.BeaconMetadata != nil && + (share.BeaconMetadata.IsAttesting() || share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && + share.BeaconMetadata.ActivationEpoch <= epoch + } +} + // onMetadataUpdated is called when validator's metadata was updated func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.ValidatorMetadata) { if meta == nil { @@ -645,24 +644,15 @@ func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.Validator } } -// onShareRemove is called when a validator was removed -// TODO: think how we can make this function atomic (i.e. failing wouldn't stop the removal of the share) -func (c *controller) onShareRemove(pk string, removeSecret bool) error { - // remove from validatorsMap - v := c.validatorsMap.RemoveValidator(pk) +// onShareStop is called when a validator was removed or liquidated +func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { + // remove from ValidatorsMap + v := c.validatorsMap.RemoveValidator(hex.EncodeToString(pubKey)) // stop instance if v != nil { v.Stop() } - // remove the share secret from key-manager - if removeSecret { - if err := c.keyManager.RemoveShare(pk); err != nil { - return errors.Wrap(err, "could not remove share secret from key manager") - } - } - - return nil } func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { @@ -672,23 +662,56 @@ func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { } if err := c.setShareFeeRecipient(share, c.recipientsStorage.GetRecipientData); err != nil { - return false, errors.Wrap(err, "could not set share fee recipient") + return false, fmt.Errorf("could not set share fee recipient: %w", err) } // Start a committee validator. - v, err := c.validatorsMap.GetOrCreateValidator(c.logger.Named("validatorsMap"), share) - if err != nil { - return false, errors.Wrap(err, "could not get or create validator") + v, found := c.validatorsMap.GetValidator(hex.EncodeToString(share.ValidatorPubKey)) + if !found { + if !share.HasBeaconMetadata() { + return false, fmt.Errorf("beacon metadata is missing") + } + + // Share context with both the validator and the runners, + // so that when the validator is stopped, the runners are stopped as well. + ctx, cancel := context.WithCancel(c.context) + + opts := c.validatorOptions + opts.SSVShare = share + opts.DutyRunners = SetupRunners(ctx, c.logger, opts) + + v = validator.NewValidator(ctx, cancel, opts) + c.validatorsMap.CreateValidator(hex.EncodeToString(share.ValidatorPubKey), v) + + c.printShare(share, "setup validator done") + + } else { + c.printShare(v.Share, "get validator") } + return c.startValidator(v) } +func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { + committee := make([]string, len(s.Committee)) + for i, c := range s.Committee { + committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) + } + c.logger.Debug(msg, + fields.PubKey(s.ValidatorPubKey), + zap.Uint64("node_id", s.OperatorID), + zap.Strings("committee", committee), + fields.FeeRecipient(s.FeeRecipientAddress[:]), + ) +} + func (c *controller) setShareFeeRecipient(share *ssvtypes.SSVShare, getRecipientData GetRecipientDataFunc) error { - var feeRecipient bellatrix.ExecutionAddress data, found, err := getRecipientData(nil, share.OwnerAddress) if err != nil { return errors.Wrap(err, "could not get recipient data") } + + var feeRecipient bellatrix.ExecutionAddress if !found { c.logger.Debug("setting fee recipient to owner address", fields.Validator(share.ValidatorPubKey), fields.FeeRecipient(share.OwnerAddress.Bytes())) @@ -727,11 +750,6 @@ func (c *controller) UpdateValidatorMetaDataLoop() { // Prepare share filters. filters := []registrystorage.SharesFilter{} - // Filter for validators who belong to our operator. - if !c.validatorOptions.Exporter { - filters = append(filters, registrystorage.ByOperatorID(c.GetOperatorData().ID)) - } - // Filter for validators who are not liquidated. filters = append(filters, registrystorage.ByNotLiquidated()) @@ -807,9 +825,10 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, - Storage: options.Storage.Get(role), - Network: options.Network, - Timer: roundtimer.New(ctx, nil), + Storage: options.Storage.Get(role), + Network: options.Network, + Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + SignatureVerification: true, } config.ValueCheckF = valueCheckF @@ -823,29 +842,29 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt for _, role := range runnersType { switch role { case spectypes.BNRoleAttester: - valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) + valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleAttester, valCheck) - runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) + runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) case spectypes.BNRoleProposer: - proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey, options.BuilderProposals) + proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleProposer, proposedValueCheck) - runners[role] = runner.NewProposerRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) + runners[role] = runner.NewProposerRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) runners[role].(*runner.ProposerRunner).ProducesBlindedBlocks = options.BuilderProposals // apply blinded block flag case spectypes.BNRoleAggregator: - aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleAggregator, aggregatorValueCheckF) - runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) + runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) case spectypes.BNRoleSyncCommittee: - syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommittee, syncCommitteeValueCheckF) - runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) case spectypes.BNRoleSyncCommitteeContribution: - syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommitteeContribution, syncCommitteeContributionValueCheckF) - runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) case spectypes.BNRoleValidatorRegistration: qbftCtrl := buildController(spectypes.BNRoleValidatorRegistration, nil) - runners[role] = runner.NewValidatorRegistrationRunner(spectypes.PraterNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) + runners[role] = runner.NewValidatorRegistrationRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) } } return runners diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index 6a06733db2..f0e5ccfb3a 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -7,17 +7,18 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" - - "github.com/bloxapp/ssv/logging" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/queue/worker" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -28,41 +29,53 @@ import ( func TestHandleNonCommitteeMessages(t *testing.T) { logger := logging.TestLogger(t) ctr := setupController(logger, map[string]*validator.Validator{}) // none committee + + // Only exporter handles non committee messages + ctr.validatorOptions.Exporter = true + go ctr.handleRouterMessages() var wg sync.WaitGroup - ctr.messageWorker.UseHandler(func(msg *spectypes.SSVMessage) error { + ctr.messageWorker.UseHandler(func(msg *queue.DecodedSSVMessage) error { wg.Done() return nil }) wg.Add(2) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.BNRoleAttester) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.BNRoleAttester) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateDecidedMessage(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateDecidedMessage(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateChangeRoundMsg(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateChangeRoundMsg(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: message.SSVSyncMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: message.SSVSyncMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: spectypes.SSVPartialSignatureMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) go func() { @@ -144,27 +157,25 @@ func TestGetIndices(t *testing.T) { logger := logging.TestLogger(t) ctr := setupController(logger, validators) - activeIndicesForCurrentEpoch := ctr.ActiveValidatorIndices(currentEpoch) + activeIndicesForCurrentEpoch := ctr.CommitteeActiveIndices(currentEpoch) require.Equal(t, 2, len(activeIndicesForCurrentEpoch)) // should return only active indices - activeIndicesForNextEpoch := ctr.ActiveValidatorIndices(currentEpoch + 1) + activeIndicesForNextEpoch := ctr.CommitteeActiveIndices(currentEpoch + 1) require.Equal(t, 3, len(activeIndicesForNextEpoch)) // should return including ValidatorStatePendingQueued } func setupController(logger *zap.Logger, validators map[string]*validator.Validator) controller { + validatorsMap := validatorsmap.New(context.TODO(), validatorsmap.WithInitialState(validators)) + return controller{ context: context.Background(), sharesStorage: nil, beacon: nil, keyManager: nil, shareEncryptionKeyProvider: nil, - validatorsMap: &validatorsMap{ - ctx: context.Background(), - lock: sync.RWMutex{}, - validatorsMap: validators, - }, - metadataUpdateInterval: 0, - messageRouter: newMessageRouter(), + validatorsMap: validatorsMap, + metadataUpdateInterval: 0, + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, &worker.Config{ Ctx: context.Background(), WorkersCount: 1, diff --git a/operator/validator/metrics.go b/operator/validator/metrics.go index 2ab82cbfc4..d9cb36e817 100644 --- a/operator/validator/metrics.go +++ b/operator/validator/metrics.go @@ -33,31 +33,3 @@ func (c *controller) reportValidatorStatus(pk []byte, meta *beacon.ValidatorMeta c.metrics.ValidatorUnknown(pk) } } - -type validatorMetrics interface { - ValidatorInactive(publicKey []byte) - ValidatorNoIndex(publicKey []byte) - ValidatorError(publicKey []byte) - ValidatorReady(publicKey []byte) - ValidatorNotActivated(publicKey []byte) - ValidatorExiting(publicKey []byte) - ValidatorSlashed(publicKey []byte) - ValidatorNotFound(publicKey []byte) - ValidatorPending(publicKey []byte) - ValidatorRemoved(publicKey []byte) - ValidatorUnknown(publicKey []byte) -} - -type nopMetrics struct{} - -func (n nopMetrics) ValidatorInactive([]byte) {} -func (n nopMetrics) ValidatorNoIndex([]byte) {} -func (n nopMetrics) ValidatorError([]byte) {} -func (n nopMetrics) ValidatorReady([]byte) {} -func (n nopMetrics) ValidatorNotActivated([]byte) {} -func (n nopMetrics) ValidatorExiting([]byte) {} -func (n nopMetrics) ValidatorSlashed([]byte) {} -func (n nopMetrics) ValidatorNotFound([]byte) {} -func (n nopMetrics) ValidatorPending([]byte) {} -func (n nopMetrics) ValidatorRemoved([]byte) {} -func (n nopMetrics) ValidatorUnknown([]byte) {} diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 6b743f6747..e7bad286b0 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -40,18 +40,32 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockController)(nil).AllActiveIndices), epoch) +} + +// CommitteeActiveIndices mocks base method. +func (m *MockController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockController)(nil).CommitteeActiveIndices), epoch) } // ExecuteDuty mocks base method. @@ -219,17 +233,17 @@ func (mr *MockControllerMockRecorder) StartValidators() *gomock.Call { } // StopValidator mocks base method. -func (m *MockController) StopValidator(publicKey []byte) error { +func (m *MockController) StopValidator(pubKey types.ValidatorPK) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopValidator", publicKey) + ret := m.ctrl.Call(m, "StopValidator", pubKey) ret0, _ := ret[0].(error) return ret0 } // StopValidator indicates an expected call of StopValidator. -func (mr *MockControllerMockRecorder) StopValidator(publicKey interface{}) *gomock.Call { +func (mr *MockControllerMockRecorder) StopValidator(pubKey interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), publicKey) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), pubKey) } // UpdateFeeRecipient mocks base method. diff --git a/operator/validator/router.go b/operator/validator/router.go index 67ef8860a9..1d43c73b16 100644 --- a/operator/validator/router.go +++ b/operator/validator/router.go @@ -1,34 +1,37 @@ package validator import ( - spectypes "github.com/bloxapp/ssv-spec/types" + "context" + "go.uber.org/zap" - "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) const bufSize = 1024 -func newMessageRouter() *messageRouter { +func newMessageRouter(logger *zap.Logger) *messageRouter { return &messageRouter{ - ch: make(chan spectypes.SSVMessage, bufSize), - msgID: commons.MsgID(), + logger: logger, + ch: make(chan *queue.DecodedSSVMessage, bufSize), } } type messageRouter struct { - ch chan spectypes.SSVMessage - msgID commons.MsgIDFunc + logger *zap.Logger + ch chan *queue.DecodedSSVMessage } -func (r *messageRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { +func (r *messageRouter) Route(ctx context.Context, message *queue.DecodedSSVMessage) { select { + case <-ctx.Done(): + r.logger.Warn("context canceled, dropping message") case r.ch <- message: default: - logger.Warn("message router buffer is full. dropping message") + r.logger.Warn("message router buffer is full, dropping message") } } -func (r *messageRouter) GetMessageChan() <-chan spectypes.SSVMessage { +func (r *messageRouter) GetMessageChan() <-chan *queue.DecodedSSVMessage { return r.ch } diff --git a/operator/validator/router_test.go b/operator/validator/router_test.go index 787e2b988d..44b3798cac 100644 --- a/operator/validator/router_test.go +++ b/operator/validator/router_test.go @@ -10,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestRouter(t *testing.T) { @@ -19,7 +20,7 @@ func TestRouter(t *testing.T) { logger := logging.TestLogger(t) - router := newMessageRouter() + router := newMessageRouter(logger) expectedCount := 1000 count := 0 @@ -40,14 +41,17 @@ func TestRouter(t *testing.T) { }() for i := 0; i < expectedCount; i++ { - msg := spectypes.SSVMessage{ - MsgType: spectypes.MsgType(i % 3), - MsgID: spectypes.NewMsgID(types.GetDefaultDomain(), []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), - Data: []byte(fmt.Sprintf("data-%d", i)), + msg := &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.MsgType(i % 3), + MsgID: spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), + Data: []byte(fmt.Sprintf("data-%d", i)), + }, } - router.Route(logger, msg) + + router.Route(context.TODO(), msg) if i%2 == 0 { - go router.Route(logger, msg) + go router.Route(context.TODO(), msg) } } diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 0ea2191716..f3b967b5b3 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -1,17 +1,16 @@ package validator import ( - "encoding/hex" - "fmt" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/ethereum/go-ethereum/common" "go.uber.org/multierr" "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logger { @@ -20,7 +19,7 @@ func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logge With(fields...) } -func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { +func (c *controller) StartValidator(share *types.SSVShare) error { // logger := c.taskLogger("StartValidator", fields.PubKey(share.ValidatorPubKey)) // Since we don't yet have the Beacon metadata for this validator, @@ -30,41 +29,30 @@ func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { return nil } -func (c *controller) StopValidator(publicKey []byte) error { - logger := c.taskLogger("StopValidator", fields.PubKey(publicKey)) +func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { + logger := c.taskLogger("StopValidator", fields.PubKey(pubKey)) - c.metrics.ValidatorRemoved(publicKey) - if err := c.onShareRemove(hex.EncodeToString(publicKey), true); err != nil { - return err - } + c.metrics.ValidatorRemoved(pubKey) + c.onShareStop(pubKey) logger.Info("removed validator") return nil } -func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("LiquidateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error { + logger := c.taskLogger("LiquidateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) for _, share := range toLiquidate { - // we can't remove the share secret from key-manager - // due to the fact that after activating the validators (ClusterReactivated) - // we don't have the encrypted keys to decrypt the secret, but only the owner address - if err := c.onShareRemove(hex.EncodeToString(share.ValidatorPubKey), false); err != nil { - return err - } - logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("removed share") + c.onShareStop(share.ValidatorPubKey) + logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("liquidated share") } return nil } -func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("ReactivateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error { + logger := c.taskLogger("ReactivateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) var startedValidators int var errs error @@ -100,17 +88,14 @@ func (c *controller) UpdateFeeRecipient(owner, recipient common.Address) error { zap.String("owner", owner.String()), zap.String("fee_recipient", recipient.String())) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { + c.validatorsMap.ForEach(func(v *validator.Validator) bool { if v.Share.OwnerAddress == owner { v.Share.FeeRecipientAddress = recipient logger.Debug("updated recipient address") } - return nil + return true }) - if err != nil { - return fmt.Errorf("update validators map: %w", err) - } return nil } diff --git a/operator/validator/validators_map.go b/operator/validator/validators_map.go deleted file mode 100644 index 02d351f39c..0000000000 --- a/operator/validator/validators_map.go +++ /dev/null @@ -1,126 +0,0 @@ -package validator - -// TODO(nkryuchkov): remove old validator interface(s) -import ( - "context" - "encoding/hex" - "fmt" - "sync" - - "github.com/bloxapp/ssv/logging/fields" - - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -// validatorIterator is the function used to iterate over existing validators -type validatorIterator func(validator *validator.Validator) error - -// validatorsMap manages a collection of running validators -type validatorsMap struct { - ctx context.Context - - optsTemplate *validator.Options - - lock sync.RWMutex - validatorsMap map[string]*validator.Validator -} - -func newValidatorsMap(ctx context.Context, optsTemplate *validator.Options) *validatorsMap { - vm := validatorsMap{ - ctx: ctx, - lock: sync.RWMutex{}, - validatorsMap: make(map[string]*validator.Validator), - optsTemplate: optsTemplate, - } - - return &vm -} - -// ForEach loops over validators -func (vm *validatorsMap) ForEach(iterator validatorIterator) error { - vm.lock.RLock() - defer vm.lock.RUnlock() - - for _, val := range vm.validatorsMap { - if err := iterator(val); err != nil { - return err - } - } - return nil -} - -// GetValidator returns a validator -func (vm *validatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { - // main lock - vm.lock.RLock() - defer vm.lock.RUnlock() - - v, ok := vm.validatorsMap[pubKey] - - return v, ok -} - -// GetOrCreateValidator creates a new validator instance if not exist -func (vm *validatorsMap) GetOrCreateValidator(logger *zap.Logger, share *types.SSVShare) (*validator.Validator, error) { - // main lock - vm.lock.Lock() - defer vm.lock.Unlock() - - pubKey := hex.EncodeToString(share.ValidatorPubKey) - if v, ok := vm.validatorsMap[pubKey]; !ok { - if !share.HasBeaconMetadata() { - return nil, fmt.Errorf("beacon metadata is missing") - } - opts := *vm.optsTemplate - opts.SSVShare = share - - // Share context with both the validator and the runners, - // so that when the validator is stopped, the runners are stopped as well. - ctx, cancel := context.WithCancel(vm.ctx) - opts.DutyRunners = SetupRunners(ctx, logger, opts) - vm.validatorsMap[pubKey] = validator.NewValidator(ctx, cancel, opts) - - printShare(share, logger, "setup validator done") - opts.SSVShare = nil - } else { - printShare(v.Share, logger, "get validator") - } - - return vm.validatorsMap[pubKey], nil -} - -// RemoveValidator removes a validator instance from the map -func (vm *validatorsMap) RemoveValidator(pubKey string) *validator.Validator { - if v, found := vm.GetValidator(pubKey); found { - vm.lock.Lock() - defer vm.lock.Unlock() - - delete(vm.validatorsMap, pubKey) - return v - } - return nil -} - -// Size returns the number of validators in the map -func (vm *validatorsMap) Size() int { - vm.lock.RLock() - defer vm.lock.RUnlock() - - return len(vm.validatorsMap) -} - -func printShare(s *types.SSVShare, logger *zap.Logger, msg string) { - committee := make([]string, len(s.Committee)) - for i, c := range s.Committee { - committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) - } - logger.Debug(msg, - fields.PubKey(s.ValidatorPubKey), - zap.Uint64("node_id", s.OperatorID), - zap.Strings("committee", committee), - fields.FeeRecipient(s.FeeRecipientAddress[:]), - ) -} diff --git a/operator/validatorsmap/validators_map.go b/operator/validatorsmap/validators_map.go new file mode 100644 index 0000000000..badc404b1c --- /dev/null +++ b/operator/validatorsmap/validators_map.go @@ -0,0 +1,110 @@ +package validatorsmap + +// TODO(nkryuchkov): remove old validator interface(s) +import ( + "context" + "sync" + + "github.com/bloxapp/ssv/protocol/v2/ssv/validator" +) + +// validatorIterator is the function used to iterate over existing validators +type validatorIterator func(validator *validator.Validator) bool + +// ValidatorsMap manages a collection of running validators +type ValidatorsMap struct { + ctx context.Context + lock sync.RWMutex + validatorsMap map[string]*validator.Validator +} + +func New(ctx context.Context, opts ...Option) *ValidatorsMap { + vm := &ValidatorsMap{ + ctx: ctx, + lock: sync.RWMutex{}, + validatorsMap: make(map[string]*validator.Validator), + } + + for _, opt := range opts { + opt(vm) + } + + return vm +} + +// Option defines EventSyncer configuration option. +type Option func(*ValidatorsMap) + +// WithInitialState sets initial state +func WithInitialState(state map[string]*validator.Validator) Option { + return func(vm *ValidatorsMap) { + vm.validatorsMap = state + } +} + +// ForEach loops over validators +func (vm *ValidatorsMap) ForEach(iterator validatorIterator) bool { + vm.lock.RLock() + defer vm.lock.RUnlock() + + for _, val := range vm.validatorsMap { + if !iterator(val) { + return false + } + } + return true +} + +// GetAll returns all validators. +func (vm *ValidatorsMap) GetAll() []*validator.Validator { + vm.lock.RLock() + defer vm.lock.RUnlock() + + var validators []*validator.Validator + for _, val := range vm.validatorsMap { + validators = append(validators, val) + } + + return validators +} + +// GetValidator returns a validator +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { + vm.lock.RLock() + defer vm.lock.RUnlock() + + v, ok := vm.validatorsMap[pubKey] + + return v, ok +} + +// CreateValidator creates a new validator instance +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) CreateValidator(pubKey string, v *validator.Validator) { + vm.lock.Lock() + defer vm.lock.Unlock() + + vm.validatorsMap[pubKey] = v +} + +// RemoveValidator removes a validator instance from the map +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) RemoveValidator(pubKey string) *validator.Validator { + if v, found := vm.GetValidator(pubKey); found { + vm.lock.Lock() + defer vm.lock.Unlock() + + delete(vm.validatorsMap, pubKey) + return v + } + return nil +} + +// Size returns the number of validators in the map +func (vm *ValidatorsMap) Size() int { + vm.lock.RLock() + defer vm.lock.RUnlock() + + return len(vm.validatorsMap) +} diff --git a/protocol/v2/blockchain/beacon/mock_client.go b/protocol/v2/blockchain/beacon/mock_client.go index 7360109bd1..2c8fa64f4d 100644 --- a/protocol/v2/blockchain/beacon/mock_client.go +++ b/protocol/v2/blockchain/beacon/mock_client.go @@ -643,6 +643,20 @@ func (mr *MockBeaconNodeMockRecorder) SubmitValidatorRegistration(pubkey, feeRec return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitValidatorRegistration", reflect.TypeOf((*MockBeaconNode)(nil).SubmitValidatorRegistration), pubkey, feeRecipient, sig) } +// SubmitVoluntaryExit mocks base method. +func (m *MockBeaconNode) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitVoluntaryExit", voluntaryExit, sig) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitVoluntaryExit indicates an expected call of SubmitVoluntaryExit. +func (mr *MockBeaconNodeMockRecorder) SubmitVoluntaryExit(voluntaryExit, sig interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitVoluntaryExit", reflect.TypeOf((*MockBeaconNode)(nil).SubmitVoluntaryExit), voluntaryExit, sig) +} + // SyncCommitteeDuties mocks base method. func (m *MockBeaconNode) SyncCommitteeDuties(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.SyncCommitteeDuty, error) { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/mocks/network.go b/protocol/v2/blockchain/beacon/mocks/network.go index 0a129035f2..65c124cbf1 100644 --- a/protocol/v2/blockchain/beacon/mocks/network.go +++ b/protocol/v2/blockchain/beacon/mocks/network.go @@ -233,6 +233,20 @@ func (mr *MockBeaconNetworkMockRecorder) GetNetwork() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetwork", reflect.TypeOf((*MockBeaconNetwork)(nil).GetNetwork)) } +// GetSlotEndTime mocks base method. +func (m *MockBeaconNetwork) GetSlotEndTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotEndTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotEndTime indicates an expected call of GetSlotEndTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotEndTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotEndTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotEndTime), slot) +} + // GetSlotStartTime mocks base method. func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/network.go b/protocol/v2/blockchain/beacon/network.go index e9f0c240c4..965890540f 100644 --- a/protocol/v2/blockchain/beacon/network.go +++ b/protocol/v2/blockchain/beacon/network.go @@ -29,6 +29,7 @@ type BeaconNetwork interface { EpochStartTime(epoch phase0.Epoch) time.Time GetSlotStartTime(slot phase0.Slot) time.Time + GetSlotEndTime(slot phase0.Slot) time.Time IsFirstSlotOfEpoch(slot phase0.Slot) bool GetEpochFirstSlot(epoch phase0.Epoch) phase0.Slot @@ -82,6 +83,11 @@ func (n Network) GetSlotStartTime(slot phase0.Slot) time.Time { return start } +// GetSlotEndTime returns the end time for the given slot +func (n Network) GetSlotEndTime(slot phase0.Slot) time.Time { + return n.GetSlotStartTime(slot + 1) +} + // EstimatedCurrentSlot returns the estimation of the current slot func (n Network) EstimatedCurrentSlot() phase0.Slot { return n.EstimatedSlotAtTime(time.Now().Unix()) diff --git a/protocol/v2/blockchain/beacon/network_test.go b/protocol/v2/blockchain/beacon/network_test.go new file mode 100644 index 0000000000..a5646bf36a --- /dev/null +++ b/protocol/v2/blockchain/beacon/network_test.go @@ -0,0 +1,19 @@ +package beacon + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestNetwork_GetSlotEndTime(t *testing.T) { + slot := phase0.Slot(1) + + n := NewNetwork(spectypes.PraterNetwork) + slotStart := n.GetSlotStartTime(slot) + slotEnd := n.GetSlotEndTime(slot) + + require.Equal(t, n.SlotDurationSec(), slotEnd.Sub(slotStart)) +} diff --git a/protocol/v2/p2p/network.go b/protocol/v2/p2p/network.go index 8e9f99a78d..bd201dddda 100644 --- a/protocol/v2/p2p/network.go +++ b/protocol/v2/p2p/network.go @@ -132,21 +132,6 @@ func WithHandler(protocol SyncProtocol, handler RequestHandler) *SyncHandler { } } -// Syncer holds the interface for syncing data from other peers -type Syncer interface { - specqbft.Syncer - // GetHistory sync the given range from a set of peers that supports history for the given identifier - // it accepts a list of targets for the request. - GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]SyncResult, specqbft.Height, error) - - // RegisterHandlers registers handler for the given protocol - RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) - - // LastDecided fetches last decided from a random set of peers - // TODO: replace with specqbft.SyncHighestDecided - LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]SyncResult, error) -} - // MsgValidationResult helps other components to report message validation with a generic results scheme type MsgValidationResult int32 @@ -173,6 +158,8 @@ type ValidationReporting interface { type Network interface { Subscriber Broadcaster - Syncer ValidationReporting + + // RegisterHandlers registers handler for the given protocol + RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) } diff --git a/protocol/v2/qbft/config.go b/protocol/v2/qbft/config.go index 580b3b03e2..21aae3df6b 100644 --- a/protocol/v2/qbft/config.go +++ b/protocol/v2/qbft/config.go @@ -3,6 +3,8 @@ package qbft import ( specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) @@ -24,18 +26,21 @@ type IConfig interface { // GetStorage returns a storage instance GetStorage() qbftstorage.QBFTStore // GetTimer returns round timer - GetTimer() specqbft.Timer + GetTimer() roundtimer.Timer + // VerifySignatures returns if signature is checked + VerifySignatures() bool } type Config struct { - Signer spectypes.SSVSigner - SigningPK []byte - Domain spectypes.DomainType - ValueCheckF specqbft.ProposedValueCheckF - ProposerF specqbft.ProposerF - Storage qbftstorage.QBFTStore - Network specqbft.Network - Timer specqbft.Timer + Signer spectypes.SSVSigner + SigningPK []byte + Domain spectypes.DomainType + ValueCheckF specqbft.ProposedValueCheckF + ProposerF specqbft.ProposerF + Storage qbftstorage.QBFTStore + Network specqbft.Network + Timer roundtimer.Timer + SignatureVerification bool } // GetSigner returns a Signer instance @@ -74,6 +79,10 @@ func (c *Config) GetStorage() qbftstorage.QBFTStore { } // GetTimer returns round timer -func (c *Config) GetTimer() specqbft.Timer { +func (c *Config) GetTimer() roundtimer.Timer { return c.Timer } + +func (c *Config) VerifySignatures() bool { + return c.SignatureVerification +} diff --git a/protocol/v2/qbft/controller/controller.go b/protocol/v2/qbft/controller/controller.go index 84abc6600f..dd786dc993 100644 --- a/protocol/v2/qbft/controller/controller.go +++ b/protocol/v2/qbft/controller/controller.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "encoding/json" + "fmt" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -24,14 +25,12 @@ type Controller struct { Identifier []byte Height specqbft.Height // incremental Height for InstanceContainer // StoredInstances stores the last HistoricalInstanceCapacity in an array for message processing purposes. - StoredInstances InstanceContainer - // FutureMsgsContainer holds all msgs from a higher height - FutureMsgsContainer map[spectypes.OperatorID]specqbft.Height // maps msg signer to height of higher height received msgs - Domain spectypes.DomainType - Share *spectypes.Share - NewDecidedHandler NewDecidedHandler `json:"-"` - config qbft.IConfig - fullNode bool + StoredInstances InstanceContainer + Domain spectypes.DomainType + Share *spectypes.Share + NewDecidedHandler NewDecidedHandler `json:"-"` + config qbft.IConfig + fullNode bool } func NewController( @@ -42,14 +41,13 @@ func NewController( fullNode bool, ) *Controller { return &Controller{ - Identifier: identifier, - Height: specqbft.FirstHeight, - Domain: domain, - Share: share, - StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), - FutureMsgsContainer: make(map[spectypes.OperatorID]specqbft.Height), - config: config, - fullNode: fullNode, + Identifier: identifier, + Height: specqbft.FirstHeight, + Domain: domain, + Share: share, + StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), + config: config, + fullNode: fullNode, } } @@ -100,10 +98,9 @@ func (c *Controller) ProcessMsg(logger *zap.Logger, msg *specqbft.SignedMessage) if IsDecidedMsg(c.Share, msg) { return c.UponDecided(logger, msg) } else if c.isFutureMessage(msg) { - return c.UponFutureMsg(logger, msg) - } else { - return c.UponExistingInstanceMsg(logger, msg) + return nil, fmt.Errorf("future msg from height, could not process") } + return c.UponExistingInstanceMsg(logger, msg) } func (c *Controller) UponExistingInstanceMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { diff --git a/protocol/v2/qbft/controller/controller_test.go b/protocol/v2/qbft/controller/controller_test.go index 35c7a39d31..cd119c9d86 100644 --- a/protocol/v2/qbft/controller/controller_test.go +++ b/protocol/v2/qbft/controller/controller_test.go @@ -1,11 +1,18 @@ package controller import ( + "encoding/json" "testing" - "github.com/bloxapp/ssv/protocol/v2/qbft" - + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/bloxapp/ssv/protocol/v2/types" ) func TestController_Marshaling(t *testing.T) { @@ -25,3 +32,60 @@ func TestController_Marshaling(t *testing.T) { require.NoError(t, err) require.EqualValues(t, byts, bytsDecoded) } + +func TestController_OnTimeoutWithRoundCheck(t *testing.T) { + // Initialize logger + logger := logging.TestLogger(t) + + testConfig := &qbft.Config{ + Signer: spectestingutils.NewTestingKeyManager(), + Network: spectestingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + } + + share := spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()) + inst := instance.NewInstance( + testConfig, + share, + []byte{1, 2, 3, 4}, + specqbft.FirstHeight, + ) + + // Initialize Controller + contr := &Controller{} + + // Initialize EventMsg for the test + timeoutData := types.TimeoutData{ + Height: specqbft.FirstHeight, + Round: specqbft.FirstRound, + } + + data, err := json.Marshal(timeoutData) + require.NoError(t, err) + + msg := &types.EventMsg{ + Type: types.Timeout, + Data: data, + } + + // Simulate a scenario where the instance is at a higher round + inst.State.Round = specqbft.Round(2) + contr.StoredInstances.addNewInstance(inst) + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did not bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should not bump") + + // Simulate a scenario where the instance is at the same or lower round + inst.State.Round = specqbft.FirstRound + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should bump") +} diff --git a/protocol/v2/qbft/controller/decided.go b/protocol/v2/qbft/controller/decided.go index 6c239a5a90..f9b694bc8e 100644 --- a/protocol/v2/qbft/controller/decided.go +++ b/protocol/v2/qbft/controller/decided.go @@ -67,8 +67,6 @@ func (c *Controller) UponDecided(logger *zap.Logger, msg *specqbft.SignedMessage } if isFutureDecided { - // sync gap - c.GetConfig().GetNetwork().SyncDecidedByRange(spectypes.MessageIDFromBytes(c.Identifier), c.Height, msg.Message.Height) // bump height c.Height = msg.Message.Height } diff --git a/protocol/v2/qbft/controller/future_msg.go b/protocol/v2/qbft/controller/future_msg.go deleted file mode 100644 index 30a205ff6e..0000000000 --- a/protocol/v2/qbft/controller/future_msg.go +++ /dev/null @@ -1,76 +0,0 @@ -package controller - -import ( - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -func (c *Controller) UponFutureMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { - if err := ValidateFutureMsg(c.GetConfig(), msg, c.Share.Committee); err != nil { - return nil, errors.Wrap(err, "invalid future msg") - } - if !c.addHigherHeightMsg(msg) { - return nil, errors.New("discarded future msg") - } - if c.f1SyncTrigger() { - logger.Debug("🔀 triggered f+1 sync", - zap.Uint64("ctrl_height", uint64(c.Height)), - zap.Uint64("msg_height", uint64(msg.Message.Height))) - return nil, c.GetConfig().GetNetwork().SyncHighestDecided(spectypes.MessageIDFromBytes(c.Identifier)) - } - return nil, nil -} - -func ValidateFutureMsg( - config qbft.IConfig, - msg *specqbft.SignedMessage, - operators []*spectypes.Operator, -) error { - if err := msg.Validate(); err != nil { - return errors.Wrap(err, "invalid decided msg") - } - - if len(msg.GetSigners()) != 1 { - return errors.New("allows 1 signer") - } - - // verify signature - if err := types.VerifyByOperators(msg.Signature, msg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") - } - - return nil -} - -// addHigherHeightMsg verifies msg, cleanup queue and adds the message if unique signer -func (c *Controller) addHigherHeightMsg(msg *specqbft.SignedMessage) bool { - // cleanup lower height msgs - cleanedQueue := make(map[spectypes.OperatorID]specqbft.Height) - signerExists := false - for signer, height := range c.FutureMsgsContainer { - if height <= c.Height { - continue - } - - if signer == msg.GetSigners()[0] { - signerExists = true - } - cleanedQueue[signer] = height - } - - if !signerExists { - cleanedQueue[msg.GetSigners()[0]] = msg.Message.Height - } - c.FutureMsgsContainer = cleanedQueue - return !signerExists -} - -// f1SyncTrigger returns true if received f+1 higher height messages from unique signers -func (c *Controller) f1SyncTrigger() bool { - return c.Share.HasPartialQuorum(len(c.FutureMsgsContainer)) -} diff --git a/protocol/v2/qbft/controller/timer.go b/protocol/v2/qbft/controller/timer.go index f073fa813c..fa3ff1e4db 100644 --- a/protocol/v2/qbft/controller/timer.go +++ b/protocol/v2/qbft/controller/timer.go @@ -19,8 +19,13 @@ func (c *Controller) OnTimeout(logger *zap.Logger, msg types.EventMsg) error { if instance == nil { return errors.New("instance is nil") } - decided, _ := instance.IsDecided() - if decided { + + if timeoutData.Round < instance.State.Round { + logger.Debug("timeout for old round", zap.Uint64("timeout round", uint64(timeoutData.Round)), zap.Uint64("instance round", uint64(instance.State.Round))) + return nil + } + + if decided, _ := instance.IsDecided(); decided { return nil } return instance.UponRoundTimeout(logger) diff --git a/protocol/v2/qbft/instance/commit.go b/protocol/v2/qbft/instance/commit.go index 5620602ea6..53d4f5855e 100644 --- a/protocol/v2/qbft/instance/commit.go +++ b/protocol/v2/qbft/instance/commit.go @@ -158,9 +158,10 @@ func BaseCommitValidation( return errors.Wrap(err, "signed commit invalid") } - // verify signature - if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/instance.go b/protocol/v2/qbft/instance/instance.go index 2513268e25..f0d99e92cd 100644 --- a/protocol/v2/qbft/instance/instance.go +++ b/protocol/v2/qbft/instance/instance.go @@ -66,7 +66,7 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh i.State.Height = height i.metrics.StartStage() - i.config.GetTimer().TimeoutForRound(specqbft.FirstRound) + i.config.GetTimer().TimeoutForRound(height, specqbft.FirstRound) logger = logger.With( fields.Round(i.State.Round), @@ -95,13 +95,9 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh } func (i *Instance) Broadcast(logger *zap.Logger, msg *specqbft.SignedMessage) error { - // logger.Debug("Broadcast", - // zap.Any("MsgType", msg.Message.MsgType), - // fields.Round(msg.Message.Round), - // zap.Any("DataRound", msg.Message.DataRound), - // fields.Height(msg.Message.Height), - // ) - + if !i.CanProcessMessages() { + return errors.New("instance stopped processing messages") + } byts, err := msg.Encode() if err != nil { return errors.Wrap(err, "could not encode message") diff --git a/protocol/v2/qbft/instance/marshalutils.go b/protocol/v2/qbft/instance/marshalutils.go new file mode 100644 index 0000000000..ba76e75453 --- /dev/null +++ b/protocol/v2/qbft/instance/marshalutils.go @@ -0,0 +1,47 @@ +package instance + +import "encoding/json" + +/////////////////////// JSON Marshalling for Tests /////////////////////// + +// region: JSON Marshalling for Instance + +// MarshalJSON is a custom JSON marshaller for Instance +func (i *Instance) MarshalJSON() ([]byte, error) { + type Alias Instance + if i.forceStop { + return json.Marshal(&struct { + ForceStop bool `json:"forceStop"` + *Alias + }{ + ForceStop: i.forceStop, + Alias: (*Alias)(i), + }) + } else { + return json.Marshal(&struct { + *Alias + }{ + Alias: (*Alias)(i), + }) + } +} + +// UnmarshalJSON is a custom JSON unmarshaller for Instance +func (i *Instance) UnmarshalJSON(data []byte) error { + type Alias Instance + aux := &struct { + ForceStop *bool `json:"forceStop,omitempty"` + *Alias + }{ + Alias: (*Alias)(i), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ForceStop != nil { + i.forceStop = *aux.ForceStop + } + return nil +} + +// endregion: JSON Marshalling for Instance diff --git a/protocol/v2/qbft/instance/metrics.go b/protocol/v2/qbft/instance/metrics.go index e2598671ad..246fbad291 100644 --- a/protocol/v2/qbft/instance/metrics.go +++ b/protocol/v2/qbft/instance/metrics.go @@ -1,14 +1,13 @@ package instance import ( - "encoding/hex" - "go.uber.org/zap" "time" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/zap" ) var ( @@ -16,11 +15,11 @@ var ( Name: "ssv_validator_instance_stage_duration_seconds", Help: "Instance stage duration (seconds)", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 5}, - }, []string{"stage", "pubKey"}) + }, []string{"stage"}) metricsRound = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ssv_qbft_instance_round", Help: "QBFT instance round", - }, []string{"roleType", "pubKey"}) + }, []string{"roleType"}) ) func init() { @@ -45,12 +44,11 @@ type metrics struct { } func newMetrics(msgID spectypes.MessageID) *metrics { - hexPubKey := hex.EncodeToString(msgID.GetPubKey()) return &metrics{ - proposalDuration: metricsStageDuration.WithLabelValues("proposal", hexPubKey), - prepareDuration: metricsStageDuration.WithLabelValues("prepare", hexPubKey), - commitDuration: metricsStageDuration.WithLabelValues("commit", hexPubKey), - round: metricsRound.WithLabelValues("validator", hexPubKey), + proposalDuration: metricsStageDuration.WithLabelValues("proposal"), + prepareDuration: metricsStageDuration.WithLabelValues("prepare"), + commitDuration: metricsStageDuration.WithLabelValues("commit"), + round: metricsRound.WithLabelValues(msgID.GetRoleType().String()), } } diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index 7714771b88..55748b33c2 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -159,8 +159,10 @@ func validSignedPrepareForHeightRoundAndRoot( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/proposal.go b/protocol/v2/qbft/instance/proposal.go index a417c04fc4..a4b5303ada 100644 --- a/protocol/v2/qbft/instance/proposal.go +++ b/protocol/v2/qbft/instance/proposal.go @@ -10,7 +10,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) // uponProposal process proposal message @@ -33,7 +33,7 @@ func (i *Instance) uponProposal(logger *zap.Logger, signedProposal *specqbft.Sig // A future justified proposal should bump us into future round and reset timer if signedProposal.Message.Round > i.State.Round { - i.config.GetTimer().TimeoutForRound(signedProposal.Message.Round) + i.config.GetTimer().TimeoutForRound(signedProposal.Message.Height, signedProposal.Message.Round) } i.bumpToRound(newRound) @@ -77,8 +77,10 @@ func isValidProposal( if len(signedProposal.GetSigners()) != 1 { return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := ssvtypes.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if !signedProposal.MatchedSigners([]spectypes.OperatorID{proposer(state, config, signedProposal.Message.Round)}) { return errors.New("proposal leader invalid") @@ -121,6 +123,30 @@ func isValidProposal( return errors.New("proposal is not valid with current state") } +func IsProposalJustification( + config qbft.IConfig, + share *ssvtypes.SSVShare, + roundChangeMsgs []*specqbft.SignedMessage, + prepareMsgs []*specqbft.SignedMessage, + height specqbft.Height, + round specqbft.Round, + fullData []byte, +) error { + return isProposalJustification( + &specqbft.State{ + Share: &share.Share, + Height: height, + }, + config, + roundChangeMsgs, + prepareMsgs, + height, + round, + fullData, + func(data []byte) error { return nil }, + ) +} + // isProposalJustification returns nil if the proposal and round change messages are valid and justify a proposal message for the provided round, value and leader func isProposalJustification( state *specqbft.State, @@ -256,7 +282,7 @@ func CreateProposal(state *specqbft.State, config qbft.IConfig, fullData []byte, } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing proposal msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/round_change.go b/protocol/v2/qbft/instance/round_change.go index 5b0de2e3c9..00cd676b3d 100644 --- a/protocol/v2/qbft/instance/round_change.go +++ b/protocol/v2/qbft/instance/round_change.go @@ -30,8 +30,11 @@ func (i *Instance) uponRoundChange( return nil // UponCommit was already called } - logger = logger.With(fields.Round(i.State.Round), - fields.Height(i.State.Height)) + logger = logger.With( + fields.Round(i.State.Round), + fields.Height(i.State.Height), + zap.Uint64("msg_round", uint64(signedRoundChange.Message.Round)), + ) logger.Debug("🔄 got round change", fields.Root(signedRoundChange.Message.Root), @@ -85,7 +88,9 @@ func (i *Instance) uponRoundChange( func (i *Instance) uponChangeRoundPartialQuorum(logger *zap.Logger, newRound specqbft.Round, instanceStartValue []byte) error { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) + roundChange, err := CreateRoundChange(i.State, i.config, newRound, instanceStartValue) if err != nil { return errors.Wrap(err, "failed to create round change message") @@ -247,8 +252,10 @@ func validRoundChangeForData( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if err := signedMsg.Message.Validate(); err != nil { @@ -377,7 +384,7 @@ func CreateRoundChange(state *specqbft.State, config qbft.IConfig, newRound spec } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing round change msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/timeout.go b/protocol/v2/qbft/instance/timeout.go index ee8e9248b7..62ae4c784c 100644 --- a/protocol/v2/qbft/instance/timeout.go +++ b/protocol/v2/qbft/instance/timeout.go @@ -1,9 +1,10 @@ package instance import ( - "github.com/bloxapp/ssv/logging/fields" "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging/fields" ) var CutoffRound = 15 // stop processing instances after 8*2+120*6 = 14.2 min (~ 2 epochs) @@ -22,7 +23,7 @@ func (i *Instance) UponRoundTimeout(logger *zap.Logger) error { defer func() { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) }() roundChange, err := CreateRoundChange(i.State, i.config, newRound, i.StartValue) diff --git a/protocol/v2/qbft/roundtimer/mocks/timer.go b/protocol/v2/qbft/roundtimer/mocks/timer.go new file mode 100644 index 0000000000..2a691f9ab6 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/mocks/timer.go @@ -0,0 +1,100 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./timer.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + qbft "github.com/bloxapp/ssv-spec/qbft" + gomock "github.com/golang/mock/gomock" +) + +// MockTimer is a mock of Timer interface. +type MockTimer struct { + ctrl *gomock.Controller + recorder *MockTimerMockRecorder +} + +// MockTimerMockRecorder is the mock recorder for MockTimer. +type MockTimerMockRecorder struct { + mock *MockTimer +} + +// NewMockTimer creates a new mock instance. +func NewMockTimer(ctrl *gomock.Controller) *MockTimer { + mock := &MockTimer{ctrl: ctrl} + mock.recorder = &MockTimerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTimer) EXPECT() *MockTimerMockRecorder { + return m.recorder +} + +// TimeoutForRound mocks base method. +func (m *MockTimer) TimeoutForRound(height qbft.Height, round qbft.Round) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TimeoutForRound", height, round) +} + +// TimeoutForRound indicates an expected call of TimeoutForRound. +func (mr *MockTimerMockRecorder) TimeoutForRound(height, round interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeoutForRound", reflect.TypeOf((*MockTimer)(nil).TimeoutForRound), height, round) +} + +// MockBeaconNetwork is a mock of BeaconNetwork interface. +type MockBeaconNetwork struct { + ctrl *gomock.Controller + recorder *MockBeaconNetworkMockRecorder +} + +// MockBeaconNetworkMockRecorder is the mock recorder for MockBeaconNetwork. +type MockBeaconNetworkMockRecorder struct { + mock *MockBeaconNetwork +} + +// NewMockBeaconNetwork creates a new mock instance. +func NewMockBeaconNetwork(ctrl *gomock.Controller) *MockBeaconNetwork { + mock := &MockBeaconNetwork{ctrl: ctrl} + mock.recorder = &MockBeaconNetworkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBeaconNetwork) EXPECT() *MockBeaconNetworkMockRecorder { + return m.recorder +} + +// GetSlotStartTime mocks base method. +func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotStartTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotStartTime indicates an expected call of GetSlotStartTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotStartTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotStartTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotStartTime), slot) +} + +// SlotDurationSec mocks base method. +func (m *MockBeaconNetwork) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockBeaconNetworkMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockBeaconNetwork)(nil).SlotDurationSec)) +} diff --git a/protocol/v2/qbft/roundtimer/testing_timer.go b/protocol/v2/qbft/roundtimer/testing_timer.go new file mode 100644 index 0000000000..310a072aa3 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/testing_timer.go @@ -0,0 +1,23 @@ +package roundtimer + +import specqbft "github.com/bloxapp/ssv-spec/qbft" + +type TimerState struct { + Timeouts int + Round specqbft.Round +} + +type TestQBFTTimer struct { + State TimerState +} + +func NewTestingTimer() Timer { + return &TestQBFTTimer{ + State: TimerState{}, + } +} + +func (t *TestQBFTTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { + t.State.Timeouts++ + t.State.Round = round +} diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index df0463e695..fde166f3dc 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -6,25 +6,36 @@ import ( "sync/atomic" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" ) -type RoundTimeoutFunc func(specqbft.Round) time.Duration +//go:generate mockgen -package=mocks -destination=./mocks/timer.go -source=./timer.go -var ( - quickTimeoutThreshold = specqbft.Round(8) - quickTimeout = 2 * time.Second - slowTimeout = 2 * time.Minute +type OnRoundTimeoutF func(round specqbft.Round) + +const ( + QuickTimeoutThreshold = specqbft.Round(8) + QuickTimeout = 2 * time.Second + SlowTimeout = 2 * time.Minute ) -// RoundTimeout returns the number of seconds until next timeout for a give round. -// if the round is smaller than 8 -> 2s; otherwise -> 2m -// see SIP https://github.com/bloxapp/SIPs/pull/22 -func RoundTimeout(r specqbft.Round) time.Duration { - if r <= quickTimeoutThreshold { - return quickTimeout - } - return slowTimeout +// Timer is an interface for a round timer, calling the UponRoundTimeout when times out +type Timer interface { + // TimeoutForRound will reset running timer if exists and will start a new timer for a specific round + TimeoutForRound(height specqbft.Height, round specqbft.Round) +} + +type BeaconNetwork interface { + GetSlotStartTime(slot phase0.Slot) time.Time + SlotDurationSec() time.Duration +} + +type TimeoutOptions struct { + quickThreshold specqbft.Round + quick time.Duration + slow time.Duration } // RoundTimer helps to manage current instance rounds. @@ -36,28 +47,98 @@ type RoundTimer struct { // timer is the underlying time.Timer timer *time.Timer // result holds the result of the timer - done func() + done OnRoundTimeoutF // round is the current round of the timer round int64 - - roundTimeout RoundTimeoutFunc + // timeoutOptions holds the timeoutOptions for the timer + timeoutOptions TimeoutOptions + // role is the role of the instance + role spectypes.BeaconRole + // beaconNetwork is the beacon network + beaconNetwork BeaconNetwork } // New creates a new instance of RoundTimer. -func New(pctx context.Context, done func()) *RoundTimer { +func New(pctx context.Context, beaconNetwork BeaconNetwork, role spectypes.BeaconRole, done OnRoundTimeoutF) *RoundTimer { ctx, cancelCtx := context.WithCancel(pctx) return &RoundTimer{ - mtx: &sync.RWMutex{}, - ctx: ctx, - cancelCtx: cancelCtx, - timer: nil, - done: done, - roundTimeout: RoundTimeout, + mtx: &sync.RWMutex{}, + ctx: ctx, + cancelCtx: cancelCtx, + timer: nil, + done: done, + role: role, + beaconNetwork: beaconNetwork, + timeoutOptions: TimeoutOptions{ + quickThreshold: QuickTimeoutThreshold, + quick: QuickTimeout, + slow: SlowTimeout, + }, } } +// RoundTimeout calculates the timeout duration for a specific role, height, and round. +// +// Timeout Rules: +// - For roles BNRoleAttester and BNRoleSyncCommittee, the base timeout is 1/3 of the slot duration. +// - For roles BNRoleAggregator and BNRoleSyncCommitteeContribution, the base timeout is 2/3 of the slot duration. +// - For role BNRoleProposer, the timeout is either quickTimeout or slowTimeout, depending on the round. +// +// Additional Timeout: +// - For rounds less than or equal to quickThreshold, the additional timeout is 'quick' seconds. +// - For rounds greater than quickThreshold, the additional timeout is 'slow' seconds. +// +// SIP Reference: +// For more details, see SIP at https://github.com/bloxapp/SIPs/pull/22 +// +// TODO: Update SIP for Deterministic Round Timeout +// TODO: Decide if to make the proposer timeout deterministic +// +// Synchronization Note: +// To ensure synchronized timeouts across instances, the timeout is based on the duty start time, +// which is calculated from the slot height. The base timeout is set based on the role, +// and the additional timeout is added based on the round number. +func (t *RoundTimer) RoundTimeout(height specqbft.Height, round specqbft.Round) time.Duration { + // Initialize duration to zero + var baseDuration time.Duration + + // Set base duration based on role + switch t.role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + // third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + // two-third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 * 2 + default: + if round <= t.timeoutOptions.quickThreshold { + return t.timeoutOptions.quick + } + return t.timeoutOptions.slow + } + + // Calculate additional timeout based on round + var additionalTimeout time.Duration + if round <= t.timeoutOptions.quickThreshold { + additionalTimeout = time.Duration(int(round)) * t.timeoutOptions.quick + } else { + quickPortion := time.Duration(t.timeoutOptions.quickThreshold) * t.timeoutOptions.quick + slowPortion := time.Duration(int(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow + additionalTimeout = quickPortion + slowPortion + } + + // Combine base duration and additional timeout + timeoutDuration := baseDuration + additionalTimeout + + // Get the start time of the duty + dutyStartTime := t.beaconNetwork.GetSlotStartTime(phase0.Slot(height)) + + // Calculate the time until the duty should start plus the timeout duration + return time.Until(dutyStartTime.Add(timeoutDuration)) +} + // OnTimeout sets a function called on timeout. -func (t *RoundTimer) OnTimeout(done func()) { +func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { t.mtx.Lock() // write to t.done defer t.mtx.Unlock() @@ -70,9 +151,10 @@ func (t *RoundTimer) Round() specqbft.Round { } // TimeoutForRound times out for a given round. -func (t *RoundTimer) TimeoutForRound(round specqbft.Round) { +func (t *RoundTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { atomic.StoreInt64(&t.round, int64(round)) - timeout := t.roundTimeout(round) + timeout := t.RoundTimeout(height, round) + // preparing the underlying timer timer := t.timer if timer == nil { @@ -101,7 +183,7 @@ func (t *RoundTimer) waitForRound(round specqbft.Round, timeout <-chan time.Time t.mtx.RLock() // read t.done defer t.mtx.RUnlock() if done := t.done; done != nil { - done() + done(round) } }() } diff --git a/protocol/v2/qbft/roundtimer/timer_test.go b/protocol/v2/qbft/roundtimer/timer_test.go index 8c41410db1..25ce776631 100644 --- a/protocol/v2/qbft/roundtimer/timer_test.go +++ b/protocol/v2/qbft/roundtimer/timer_test.go @@ -2,45 +2,167 @@ package roundtimer import ( "context" + "fmt" + "sync" "sync/atomic" "testing" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer/mocks" ) -func TestRoundTimer_TimeoutForRound(t *testing.T) { - t.Run("TimeoutForRound", func(t *testing.T) { - count := int32(0) - onTimeout := func() { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond - } - timer.TimeoutForRound(specqbft.Round(1)) - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) - - t.Run("timeout round before elapsed", func(t *testing.T) { - count := int32(0) - onTimeout := func() { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond +func TestTimeoutForRound(t *testing.T) { + roles := []spectypes.BeaconRole{ + spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + } + + for _, role := range roles { + t.Run(fmt.Sprintf("TimeoutForRound - %s: <= quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(1)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: > quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(2)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: before elapsed", role), func(t *testing.T) { + testTimeoutForRoundElapsed(t, role, specqbft.Round(2)) + }) + + // TODO: Decide if to make the proposer timeout deterministic + // Proposer role is not tested for multiple synchronized timers since it's not deterministic + if role == spectypes.BNRoleProposer { + continue } - timer.TimeoutForRound(specqbft.Round(1)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) / 2) - timer.TimeoutForRound(specqbft.Round(2)) // reset before elapsed - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(2)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) + t.Run(fmt.Sprintf("TimeoutForRound - %s: multiple synchronized timers", role), func(t *testing.T) { + testTimeoutForRoundMulti(t, role, specqbft.Round(1)) + }) + } +} + +func setupMockBeaconNetwork(t *testing.T) *mocks.MockBeaconNetwork { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(120 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return time.Now() + }, + ).AnyTimes() + return mockBeaconNetwork +} + +func setupTimer(mockBeaconNetwork *mocks.MockBeaconNetwork, onTimeout OnRoundTimeoutF, role spectypes.BeaconRole, round specqbft.Round) *RoundTimer { + timer := New(context.Background(), mockBeaconNetwork, role, onTimeout) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: round, + quick: 100 * time.Millisecond, + slow: 200 * time.Millisecond, + } + + return timer +} + +func testTimeoutForRound(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, threshold) + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, threshold) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundElapsed(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) / 2) + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.Round(2)) // reset before elapsed + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.Round(2)) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundMulti(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + var count int32 + var timestamps = make([]int64, 4) + var mu sync.Mutex + + onTimeout := func(index int) { + atomic.AddInt32(&count, 1) + mu.Lock() + timestamps[index] = time.Now().UnixNano() + mu.Unlock() + } + + timeNow := time.Now() + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(100 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return timeNow + }, + ).AnyTimes() + + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + go func(index int) { + timer := New(context.Background(), mockBeaconNetwork, role, func(round specqbft.Round) { onTimeout(index) }) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: threshold, + quick: 100 * time.Millisecond, + } + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + wg.Done() + }(i) + time.Sleep(time.Millisecond * 10) // Introduce a sleep between creating timers + } + + wg.Wait() // Wait for all go-routines to finish + + timer := New(context.Background(), mockBeaconNetwork, role, nil) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: specqbft.Round(1), + quick: 100 * time.Millisecond, + } + + // Wait a bit more than the expected timeout to ensure all timers have triggered + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) + time.Millisecond*100) + + require.Equal(t, int32(4), atomic.LoadInt32(&count), "All four timers should have triggered") + + mu.Lock() + for i := 1; i < 4; i++ { + require.InDelta(t, timestamps[0], timestamps[i], float64(time.Millisecond*10), "All four timers should expire nearly at the same time") + } + mu.Unlock() } diff --git a/protocol/v2/qbft/spectest/controller_sync_type.go b/protocol/v2/qbft/spectest/controller_sync_type.go deleted file mode 100644 index 08fc7b2332..0000000000 --- a/protocol/v2/qbft/spectest/controller_sync_type.go +++ /dev/null @@ -1,55 +0,0 @@ -package qbft - -import ( - "encoding/hex" - "testing" - - qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/bloxapp/ssv/protocol/v2/types" - - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" - spectypes "github.com/bloxapp/ssv-spec/types" - spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/stretchr/testify/require" -) - -func RunControllerSync(t *testing.T, test *futuremsg.ControllerSyncSpecTest) { - logger := logging.TestLogger(t) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), spectestingutils.TestingValidatorPubKey[:], spectypes.BNRoleAttester) - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), identifier.GetRoleType()) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) - - err := contr.StartNewInstance(logger, 0, []byte{1, 2, 3, 4}) - if err != nil { - t.Fatalf(err.Error()) - } - - var lastErr error - for _, msg := range test.InputMessages { - logger = logger.With(fields.Height(msg.Message.Height)) - _, err := contr.ProcessMsg(logger, msg) - if err != nil { - lastErr = err - } - } - - syncedDecidedCnt := config.GetNetwork().(*spectestingutils.TestingNetwork).SyncHighestDecidedCnt - require.EqualValues(t, test.SyncDecidedCalledCnt, syncedDecidedCnt) - - r, err := contr.GetRoot() - require.NoError(t, err) - require.EqualValues(t, test.ControllerPostRoot, hex.EncodeToString(r[:])) - - if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) - } else { - require.NoError(t, lastErr) - } -} diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index 0d32a545c2..a919cc104b 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -3,6 +3,10 @@ package qbft import ( "bytes" "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" "reflect" "testing" @@ -10,29 +14,32 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + //temporary to override state comparisons from file not inputted one + overrideStateComparisonForControllerSpecTest(t, test) + logger := logging.TestLogger(t) - identifier := []byte{1, 2, 3, 4} - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) + contr := generateController(logger) var lastErr error for i, runData := range test.RunInstanceData { - if err := runInstanceWithData(t, logger, specqbft.Height(i), contr, config, identifier, runData); err != nil { + height := specqbft.Height(i) + if runData.Height != nil { + height = *runData.Height + } + if err := runInstanceWithData(t, logger, height, contr, runData); err != nil { lastErr = err } } @@ -44,13 +51,24 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { } } +func generateController(logger *zap.Logger) *controller.Controller { + identifier := []byte{1, 2, 3, 4} + config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) + return qbfttesting.NewTestingQBFTController( + identifier[:], + spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), + config, + false, + ) +} + func testTimer( t *testing.T, config *qbft.Config, runData *spectests.RunInstanceData, ) { if runData.ExpectedTimerState != nil { - if timer, ok := config.GetTimer().(*spectestingutils.TestQBFTTimer); ok { + if timer, ok := config.GetTimer().(*roundtimer.TestQBFTTimer); ok { require.Equal(t, runData.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, runData.ExpectedTimerState.Round, timer.State.Round) } @@ -79,13 +97,6 @@ func testProcessMsg( } require.EqualValues(t, runData.ExpectedDecidedState.DecidedCnt, decidedCnt, lastErr) - // verify sync decided by range calls - if runData.ExpectedDecidedState.CalledSyncDecidedByRange { - require.EqualValues(t, runData.ExpectedDecidedState.DecidedByRangeValues, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } else { - require.EqualValues(t, [2]specqbft.Height{0, 0}, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } - return lastErr } @@ -129,20 +140,20 @@ func testBroadcastedDecided( } } -func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, config *qbft.Config, identifier []byte, runData *spectests.RunInstanceData) error { +func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, runData *spectests.RunInstanceData) error { err := contr.StartNewInstance(logger, height, runData.InputValue) var lastErr error if err != nil { lastErr = err } - testTimer(t, config, runData) + testTimer(t, contr.GetConfig().(*qbft.Config), runData) - if err := testProcessMsg(t, logger, contr, config, runData); err != nil { + if err := testProcessMsg(t, logger, contr, contr.GetConfig().(*qbft.Config), runData); err != nil { lastErr = err } - testBroadcastedDecided(t, config, identifier, runData) + testBroadcastedDecided(t, contr.GetConfig().(*qbft.Config), contr.Identifier, runData) // test root r, err := contr.GetRoot() @@ -151,3 +162,24 @@ func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Heigh return lastErr } + +func overrideStateComparisonForControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + specDir = filepath.Join(specDir, "generate") + dir := typescomparable.GetSCDir(specDir, reflect.TypeOf(test).String()) + path := filepath.Join(dir, fmt.Sprintf("%s.json", test.TestName())) + byteValue, err := os.ReadFile(filepath.Clean(path)) + require.NoError(t, err) + sc := make([]*controller.Controller, len(test.RunInstanceData)) + require.NoError(t, json.Unmarshal(byteValue, &sc)) + + for i, runData := range test.RunInstanceData { + runData.ControllerPostState = sc[i] + + r, err := sc[i].GetRoot() + require.NoError(t, err) + + runData.ControllerPostRoot = hex.EncodeToString(r[:]) + } +} diff --git a/protocol/v2/qbft/spectest/msg_processing_type.go b/protocol/v2/qbft/spectest/msg_processing_type.go index 63c8922862..15606c2ece 100644 --- a/protocol/v2/qbft/spectest/msg_processing_type.go +++ b/protocol/v2/qbft/spectest/msg_processing_type.go @@ -3,6 +3,8 @@ package qbft import ( "encoding/hex" "fmt" + "path/filepath" + "reflect" "testing" "time" @@ -10,15 +12,19 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" "github.com/stretchr/testify/require" ) // RunMsgProcessing processes MsgProcessingSpecTest. It probably may be removed. func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { + overrideStateComparisonForMsgProcessingSpecTest(t, test) + // a little trick we do to instantiate all the internal instance params preByts, _ := test.Pre.Encode() msgId := specqbft.ControllerIdToMessageID(test.Pre.State.ID) @@ -49,7 +55,7 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected %v, but got %v", test.ExpectedError, lastErr) } else { require.NoError(t, lastErr) } @@ -78,3 +84,22 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { require.EqualValues(t, test.PostRoot, hex.EncodeToString(postRoot[:]), "post root not valid") } + +func overrideStateComparisonForMsgProcessingSpecTest(t *testing.T, test *spectests.MsgProcessingSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + test.PostState, err = typescomparable.UnmarshalStateComparison(specDir, test.TestName(), + reflect.TypeOf(test).String(), + &specqbft.State{}) + require.NoError(t, err) + + r, err := test.PostState.GetRoot() + require.NoError(t, err) + + // backwards compatability test, hard coded post root must be equal to the one loaded from file + if len(test.PostRoot) > 0 { + require.EqualValues(t, test.PostRoot, hex.EncodeToString(r[:])) + } + + test.PostRoot = hex.EncodeToString(r[:]) +} diff --git a/protocol/v2/qbft/spectest/qbft_mapping_test.go b/protocol/v2/qbft/spectest/qbft_mapping_test.go index d771e98d1f..00903a0adc 100644 --- a/protocol/v2/qbft/spectest/qbft_mapping_test.go +++ b/protocol/v2/qbft/spectest/qbft_mapping_test.go @@ -8,13 +8,13 @@ import ( "testing" spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" "github.com/bloxapp/ssv-spec/qbft/spectest/tests/timeout" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/logging" testing2 "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" @@ -31,18 +31,12 @@ func TestQBFTMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test - testName := strings.Split(name, "_")[1] testType := strings.Split(name, "_")[0] - switch testType { case reflect.TypeOf(&spectests.MsgProcessingSpecTest{}).String(): byts, err := json.Marshal(test) @@ -51,6 +45,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsgProcessing(t, typedTest) }) case reflect.TypeOf(&spectests.MsgSpecTest{}).String(): @@ -60,6 +55,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsg(t, typedTest) }) case reflect.TypeOf(&spectests.ControllerSpecTest{}).String(): @@ -69,6 +65,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunControllerSpecTest(t, typedTest) }) case reflect.TypeOf(&spectests.CreateMsgSpecTest{}).String(): @@ -78,6 +75,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunCreateMsg(t, typedTest) }) case reflect.TypeOf(&spectests.RoundRobinSpecTest{}).String(): @@ -87,21 +85,12 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { // using only spec struct so no need to run our version (TODO: check how we choose leader) + t.Parallel() typedTest.Run(t) }) /*t.Run(typedTest.TestName(), func(t *testing.T) { RunMsg(t, typedTest) })*/ - - case reflect.TypeOf(&futuremsg.ControllerSyncSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &futuremsg.ControllerSyncSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { - RunControllerSync(t, typedTest) - }) case reflect.TypeOf(&timeout.SpecTest{}).String(): byts, err := json.Marshal(test) require.NoError(t, err) diff --git a/protocol/v2/qbft/spectest/timeout_type.go b/protocol/v2/qbft/spectest/timeout_type.go index 637e1dd374..73b3fe7cde 100644 --- a/protocol/v2/qbft/spectest/timeout_type.go +++ b/protocol/v2/qbft/spectest/timeout_type.go @@ -7,8 +7,11 @@ import ( "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/stretchr/testify/require" ) @@ -32,7 +35,7 @@ func RunTimeout(t *testing.T, test *SpecTest) { } // test calling timeout - timer, ok := test.Pre.GetConfig().GetTimer().(*testingutils.TestQBFTTimer) + timer, ok := test.Pre.GetConfig().GetTimer().(*roundtimer.TestQBFTTimer) require.True(t, ok) require.Equal(t, test.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, test.ExpectedTimerState.Round, timer.State.Round) diff --git a/protocol/v2/qbft/testing/utils.go b/protocol/v2/qbft/testing/utils.go index 35291f0acc..c6741925ce 100644 --- a/protocol/v2/qbft/testing/utils.go +++ b/protocol/v2/qbft/testing/utils.go @@ -6,10 +6,13 @@ import ( specqbft "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, role types.BeaconRole) *qbft.Config { @@ -31,9 +34,10 @@ var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, ro ProposerF: func(state *specqbft.State, round specqbft.Round) types.OperatorID { return 1 }, - Storage: TestingStores(logger).Get(role), - Network: testingutils.NewTestingNetwork(), - Timer: testingutils.NewTestingTimer(), + Storage: TestingStores(logger).Get(role), + Network: testingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + SignatureVerification: true, } } diff --git a/protocol/v2/queue/worker/message_worker.go b/protocol/v2/queue/worker/message_worker.go index ee96301870..5c9f2b3f97 100644 --- a/protocol/v2/queue/worker/message_worker.go +++ b/protocol/v2/queue/worker/message_worker.go @@ -2,11 +2,12 @@ package worker import ( "context" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) var ( @@ -24,12 +25,12 @@ func init() { } // MsgHandler func that receive message.SSVMessage to handle -type MsgHandler func(msg *spectypes.SSVMessage) error +type MsgHandler func(msg *queue.DecodedSSVMessage) error // ErrorHandler func that handles an error for a specific message -type ErrorHandler func(msg *spectypes.SSVMessage, err error) error +type ErrorHandler func(msg *queue.DecodedSSVMessage, err error) error -func defaultErrHandler(msg *spectypes.SSVMessage, err error) error { +func defaultErrHandler(msg *queue.DecodedSSVMessage, err error) error { return err } @@ -46,7 +47,7 @@ type Worker struct { ctx context.Context cancel context.CancelFunc workersCount int - queue chan *spectypes.SSVMessage + queue chan *queue.DecodedSSVMessage handler MsgHandler errHandler ErrorHandler metricsPrefix string @@ -60,7 +61,7 @@ func NewWorker(logger *zap.Logger, cfg *Config) *Worker { ctx: ctx, cancel: cancel, workersCount: cfg.WorkersCount, - queue: make(chan *spectypes.SSVMessage, cfg.Buffer), + queue: make(chan *queue.DecodedSSVMessage, cfg.Buffer), errHandler: defaultErrHandler, metricsPrefix: cfg.MetrixPrefix, } @@ -78,7 +79,7 @@ func (w *Worker) init(logger *zap.Logger) { } // startWorker process functionality -func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *spectypes.SSVMessage) { +func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *queue.DecodedSSVMessage) { ctx, cancel := context.WithCancel(w.ctx) defer cancel() for { @@ -104,7 +105,7 @@ func (w *Worker) UseErrorHandler(errHandler ErrorHandler) { // TryEnqueue tries to enqueue a job to the given job channel. Returns true if // the operation was successful, and false if enqueuing would not have been // possible without blocking. Job is not enqueued in the latter case. -func (w *Worker) TryEnqueue(msg *spectypes.SSVMessage) bool { +func (w *Worker) TryEnqueue(msg *queue.DecodedSSVMessage) bool { select { case w.queue <- msg: return true @@ -125,7 +126,7 @@ func (w *Worker) Size() int { } // process the msg's from queue -func (w *Worker) process(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (w *Worker) process(logger *zap.Logger, msg *queue.DecodedSSVMessage) { if w.handler == nil { logger.Warn("❗ no handler for worker") return diff --git a/protocol/v2/queue/worker/message_worker_test.go b/protocol/v2/queue/worker/message_worker_test.go index b5cec21317..adbf5032d0 100644 --- a/protocol/v2/queue/worker/message_worker_test.go +++ b/protocol/v2/queue/worker/message_worker_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestWorker(t *testing.T) { @@ -20,12 +20,12 @@ func TestWorker(t *testing.T) { Buffer: 2, }) - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) return nil }) for i := 0; i < 5; i++ { - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) time.Sleep(time.Second * 1) } } @@ -41,7 +41,7 @@ func TestManyWorkers(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() return nil @@ -49,7 +49,7 @@ func TestManyWorkers(t *testing.T) { for i := 0; i < 10; i++ { wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } @@ -65,7 +65,7 @@ func TestBuffer(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() time.Sleep(time.Millisecond * 100) @@ -74,7 +74,7 @@ func TestBuffer(t *testing.T) { for i := 0; i < 11; i++ { // should buffer 10 msgs wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } diff --git a/protocol/v2/ssv/queue/message_prioritizer_test.go b/protocol/v2/ssv/queue/message_prioritizer_test.go index f07e5e2691..deb3654b45 100644 --- a/protocol/v2/ssv/queue/message_prioritizer_test.go +++ b/protocol/v2/ssv/queue/message_prioritizer_test.go @@ -17,7 +17,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) var messagePriorityTests = []struct { @@ -125,7 +124,7 @@ func TestMessagePrioritizer(t *testing.T) { messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { var err error - messages[i], err = DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + messages[i], err = DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) } diff --git a/protocol/v2/ssv/queue/messages.go b/protocol/v2/ssv/queue/messages.go index 01c6fb945c..f69644eee7 100644 --- a/protocol/v2/ssv/queue/messages.go +++ b/protocol/v2/ssv/queue/messages.go @@ -1,25 +1,31 @@ package queue import ( + "fmt" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/pkg/errors" - "go.uber.org/zap" ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) +var ( + ErrUnknownMessageType = fmt.Errorf("unknown message type") +) + // DecodedSSVMessage is a bundle of SSVMessage and it's decoding. +// TODO: try to make it generic type DecodedSSVMessage struct { *spectypes.SSVMessage // Body is the decoded Data. - Body interface{} // *SignedMessage | *SignedPartialSignatureMessage + Body interface{} // *SignedMessage | *SignedPartialSignatureMessage | *EventMsg } // DecodeSSVMessage decodes an SSVMessage and returns a DecodedSSVMessage. -func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { +func DecodeSSVMessage(m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { var body interface{} switch m.MsgType { case spectypes.SSVConsensusMsgType: // TODO: Or message.SSVDecidedMsgType? @@ -40,6 +46,8 @@ func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVM return nil, errors.Wrap(err, "failed to decode EventMsg") } body = msg + default: + return nil, ErrUnknownMessageType } return &DecodedSSVMessage{ SSVMessage: m, diff --git a/protocol/v2/ssv/queue/metrics.go b/protocol/v2/ssv/queue/metrics.go index 99d3c30ad3..36206704cc 100644 --- a/protocol/v2/ssv/queue/metrics.go +++ b/protocol/v2/ssv/queue/metrics.go @@ -1,14 +1,12 @@ package queue import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + spectypes "github.com/bloxapp/ssv-spec/types" ) // Metrics records metrics about the Queue. type Metrics interface { - // Dropped increments the number of messages dropped from the Queue. - Dropped() + DroppedQueueMessage(messageID spectypes.MessageID) } type queueWithMetrics struct { @@ -27,35 +25,8 @@ func WithMetrics(q Queue, metrics Metrics) Queue { func (q *queueWithMetrics) TryPush(msg *DecodedSSVMessage) bool { pushed := q.Queue.TryPush(msg) if !pushed { - q.metrics.Dropped() + q.metrics.DroppedQueueMessage(msg.GetID()) } - return pushed -} - -// TODO: move to metrics/prometheus package -type prometheusMetrics struct { - dropped prometheus.Counter -} - -// NewPrometheusMetrics returns a Prometheus implementation of Metrics. -func NewPrometheusMetrics(messageID string) Metrics { - return &prometheusMetrics{ - dropped: metricMessageDropped.WithLabelValues(messageID), - } -} - -func (m *prometheusMetrics) Dropped() { - m.dropped.Inc() -} -// Register Prometheus metrics. -var ( - metricMessageDropped = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:ibft:msgq:drops", - Help: "The amount of message dropped from the validator's msg queue", - }, []string{"msg_id"}) -) - -func init() { - _ = prometheus.Register(metricMessageDropped) + return pushed } diff --git a/protocol/v2/ssv/queue/queue_test.go b/protocol/v2/ssv/queue/queue_test.go index a835779566..4b46c0e045 100644 --- a/protocol/v2/ssv/queue/queue_test.go +++ b/protocol/v2/ssv/queue/queue_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" "golang.org/x/text/language" "golang.org/x/text/message" ) @@ -109,7 +109,7 @@ func TestPriorityQueue_Pop(t *testing.T) { queue := New(capacity) require.True(t, queue.Empty()) - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) // Push messages. @@ -163,7 +163,7 @@ func TestPriorityQueue_Order(t *testing.T) { // Decode messages. messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { - mm, err := DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + mm, err := DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) messages[i] = mm } @@ -184,30 +184,32 @@ func TestPriorityQueue_Order(t *testing.T) { } } -type mockMetrics struct { - dropped int +type testMetrics struct { + dropped atomic.Uint64 } -func (m *mockMetrics) Dropped() { m.dropped++ } +func (n *testMetrics) DroppedQueueMessage(messageID spectypes.MessageID) { + n.dropped.Add(1) +} func TestWithMetrics(t *testing.T) { - var metrics mockMetrics - queue := WithMetrics(New(1), &metrics) + metrics := &testMetrics{} + queue := WithMetrics(New(1), metrics) require.True(t, queue.Empty()) // Push 1 message. - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) pushed := queue.TryPush(msg) require.True(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 0, metrics.dropped) + require.EqualValues(t, 0, metrics.dropped.Load()) // Push above capacity. pushed = queue.TryPush(msg) require.False(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 1, metrics.dropped) + require.EqualValues(t, 1, metrics.dropped.Load()) } func BenchmarkPriorityQueue_Parallel(b *testing.B) { @@ -234,7 +236,7 @@ func benchmarkPriorityQueueParallel(b *testing.B, factory func() Queue, lossy bo messages := make([]*DecodedSSVMessage, messageCount) for i := range messages { var err error - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(b, err) messages[i] = msg } @@ -359,7 +361,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { for _, i := range rand.Perm(messageCount) { height := qbft.FirstHeight + qbft.Height(i) for _, t := range types { - decoded, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) + decoded, err := DecodeSSVMessage(mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) require.NoError(b, err) msgs <- decoded } @@ -412,7 +414,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { } func decodeAndPush(t require.TestingT, queue Queue, msg mockMessage, state *State) *DecodedSSVMessage { - decoded, err := DecodeSSVMessage(zap.L(), msg.ssvMessage(state)) + decoded, err := DecodeSSVMessage(msg.ssvMessage(state)) require.NoError(t, err) queue.Push(decoded) return decoded diff --git a/protocol/v2/ssv/runner/metrics/metrics.go b/protocol/v2/ssv/runner/metrics/metrics.go index 5251ed909e..56ae65ed0f 100644 --- a/protocol/v2/ssv/runner/metrics/metrics.go +++ b/protocol/v2/ssv/runner/metrics/metrics.go @@ -45,9 +45,6 @@ var ( 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, - 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8, 5.9, 6.0, - 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7.0, - 7.1, 7.2, 7.3, 7.4, 7.5, 7.6, 7.7, 7.8, 7.9, 8.0, }, }, []string{"role"}) metricsRolesSubmitted = promauto.NewCounterVec(prometheus.CounterOpts{ diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index b59d404907..9034d5da84 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -96,6 +96,18 @@ func NewBaseRunner( // baseStartNewDuty is a base func that all runner implementation can call to start a duty func (b *BaseRunner) baseStartNewDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessDuty(duty); err != nil { + return errors.Wrap(err, "can't start duty") + } + b.baseSetupForNewDuty(duty) + return runner.executeDuty(logger, duty) +} + +// baseStartNewBeaconDuty is a base func that all runner implementation can call to start a non-beacon duty +func (b *BaseRunner) baseStartNewNonBeaconDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessNonBeaconDuty(duty); err != nil { + return errors.Wrap(err, "can't start non-beacon duty") + } b.baseSetupForNewDuty(duty) return runner.executeDuty(logger, duty) } @@ -265,3 +277,20 @@ func (b *BaseRunner) hasRunningDuty() bool { } return !b.State.Finished } + +func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { + if b.QBFTController.Height >= specqbft.Height(duty.Slot) && b.QBFTController.Height != 0 { + return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + b.QBFTController.Height) + } + return nil +} + +func (b *BaseRunner) ShouldProcessNonBeaconDuty(duty *spectypes.Duty) error { + // assume StartingDuty is not nil if state is not nil + if b.State != nil && b.State.StartingDuty.Slot >= duty.Slot { + return errors.Errorf("duty for slot %d already passed. Current slot is %d", duty.Slot, + b.State.StartingDuty.Slot) + } + return nil +} diff --git a/protocol/v2/ssv/runner/runner_signatures.go b/protocol/v2/ssv/runner/runner_signatures.go index 96b2a723f5..54e4d9de1e 100644 --- a/protocol/v2/ssv/runner/runner_signatures.go +++ b/protocol/v2/ssv/runner/runner_signatures.go @@ -3,10 +3,11 @@ package runner import ( spec "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/types" ssz "github.com/ferranbt/fastssz" "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" + + "github.com/bloxapp/ssv/protocol/v2/types" ) func (b *BaseRunner) signBeaconObject( diff --git a/protocol/v2/ssv/runner/timer.go b/protocol/v2/ssv/runner/timer.go index 9d8e4a315f..51e25ccbf6 100644 --- a/protocol/v2/ssv/runner/timer.go +++ b/protocol/v2/ssv/runner/timer.go @@ -9,7 +9,7 @@ import ( "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) -type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() +type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF func (b *BaseRunner) registerTimeoutHandler(logger *zap.Logger, instance *instance.Instance, height specqbft.Height) { identifier := spectypes.MessageIDFromBytes(instance.State.ID) diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 10bf6a39fe..f12dae2f08 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -2,6 +2,7 @@ package runner import ( "crypto/sha256" + "encoding/hex" "encoding/json" v1 "github.com/attestantio/go-eth2-client/api/v1" @@ -53,7 +54,7 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + return r.BaseRunner.baseStartNewNonBeaconDuty(logger, r, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) @@ -85,7 +86,9 @@ func (r *ValidatorRegistrationRunner) ProcessPreConsensus(logger *zap.Logger, si return errors.Wrap(err, "could not submit validator registration") } - logger.Debug("validator registration submitted successfully", fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:])) + logger.Debug("validator registration submitted successfully", + fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:]), + zap.String("signature", hex.EncodeToString(specSig[:]))) r.GetState().Finished = true return nil diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go new file mode 100644 index 0000000000..147e5f1471 --- /dev/null +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -0,0 +1,226 @@ +package runner + +import ( + "crypto/sha256" + "encoding/json" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + specssv "github.com/bloxapp/ssv-spec/ssv" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/protocol/v2/ssv/runner/metrics" + ssz "github.com/ferranbt/fastssz" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// Duty runner for validator voluntary exit duty +type VoluntaryExitRunner struct { + BaseRunner *BaseRunner + + beacon specssv.BeaconNode + network specssv.Network + signer spectypes.KeyManager + valCheck specqbft.ProposedValueCheckF + + voluntaryExit *phase0.VoluntaryExit + + metrics metrics.ConsensusMetrics +} + +func NewVoluntaryExitRunner( + beaconNetwork spectypes.BeaconNetwork, + share *spectypes.Share, + beacon specssv.BeaconNode, + network specssv.Network, + signer spectypes.KeyManager, +) Runner { + return &VoluntaryExitRunner{ + BaseRunner: &BaseRunner{ + BeaconRoleType: spectypes.BNRoleVoluntaryExit, + BeaconNetwork: beaconNetwork, + Share: share, + }, + + beacon: beacon, + network: network, + signer: signer, + metrics: metrics.NewConsensusMetrics(spectypes.BNRoleValidatorRegistration), + } +} + +func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { + return r.BaseRunner.baseStartNewNonBeaconDuty(logger, r, duty) +} + +// HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) +func (r *VoluntaryExitRunner) HasRunningDuty() bool { + return r.BaseRunner.hasRunningDuty() +} + +// Check for quorum of partial signatures over VoluntaryExit and, +// if has quorum, constructs SignedVoluntaryExit and submits to BeaconNode +func (r *VoluntaryExitRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + quorum, roots, err := r.BaseRunner.basePreConsensusMsgProcessing(r, signedMsg) + if err != nil { + return errors.Wrap(err, "failed processing voluntary exit message") + } + + // quorum returns true only once (first time quorum achieved) + if !quorum { + return nil + } + + // only 1 root, verified in basePreConsensusMsgProcessing + root := roots[0] + fullSig, err := r.GetState().ReconstructBeaconSig(r.GetState().PreConsensusContainer, root, r.GetShare().ValidatorPubKey) + if err != nil { + return errors.Wrap(err, "could not reconstruct voluntary exit sig") + } + specSig := phase0.BLSSignature{} + copy(specSig[:], fullSig) + + // create SignedVoluntaryExit using VoluntaryExit created on r.executeDuty() and reconstructed signature + signedVoluntaryExit := &phase0.SignedVoluntaryExit{ + Message: r.voluntaryExit, + Signature: specSig, + } + + if err := r.beacon.SubmitVoluntaryExit(signedVoluntaryExit, specSig); err != nil { + return errors.Wrap(err, "could not submit voluntary exit") + } + + r.GetState().Finished = true + return nil +} + +func (r *VoluntaryExitRunner) ProcessConsensus(logger *zap.Logger, signedMsg *specqbft.SignedMessage) error { + return errors.New("no consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + return errors.New("no post consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + vr, err := r.calculateVoluntaryExit() + if err != nil { + return nil, spectypes.DomainError, errors.Wrap(err, "could not calculate voluntary exit") + } + return []ssz.HashRoot{vr}, spectypes.DomainVoluntaryExit, nil +} + +// expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign +func (r *VoluntaryExitRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + return nil, [4]byte{}, errors.New("no post consensus roots for voluntary exit") +} + +// Validator voluntary exit duty doesn't need consensus nor post-consensus. +// It just performs pre-consensus with VoluntaryExitPartialSig over +// a VoluntaryExit object to create a SignedVoluntaryExit +func (r *VoluntaryExitRunner) executeDuty(logger *zap.Logger, duty *spectypes.Duty) error { + voluntaryExit, err := r.calculateVoluntaryExit() + if err != nil { + return errors.Wrap(err, "could not calculate voluntary exit") + } + + // get PartialSignatureMessage with voluntaryExit root and signature + msg, err := r.BaseRunner.signBeaconObject(r, voluntaryExit, duty.Slot, spectypes.DomainVoluntaryExit) + if err != nil { + return errors.Wrap(err, "could not sign VoluntaryExit object") + } + + msgs := spectypes.PartialSignatureMessages{ + Type: spectypes.VoluntaryExitPartialSig, + Slot: duty.Slot, + Messages: []*spectypes.PartialSignatureMessage{msg}, + } + + // sign PartialSignatureMessages object + signature, err := r.GetSigner().SignRoot(msgs, spectypes.PartialSignatureType, r.GetShare().SharePubKey) + if err != nil { + return errors.Wrap(err, "could not sign randao msg") + } + signedPartialMsg := &spectypes.SignedPartialSignatureMessage{ + Message: msgs, + Signature: signature, + Signer: r.GetShare().OperatorID, + } + + // broadcast + data, err := signedPartialMsg.Encode() + if err != nil { + return errors.Wrap(err, "failed to encode signedPartialMsg with VoluntaryExit") + } + msgToBroadcast := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(r.GetShare().DomainType, r.GetShare().ValidatorPubKey, r.BaseRunner.BeaconRoleType), + Data: data, + } + if err := r.GetNetwork().Broadcast(msgToBroadcast); err != nil { + return errors.Wrap(err, "can't broadcast signedPartialMsg with VoluntaryExit") + } + + // stores value for later using in ProcessPreConsensus + r.voluntaryExit = voluntaryExit + + return nil +} + +// Returns *phase0.VoluntaryExit object with current epoch and own validator index +func (r *VoluntaryExitRunner) calculateVoluntaryExit() (*phase0.VoluntaryExit, error) { + epoch := r.BaseRunner.BeaconNetwork.EstimatedEpochAtSlot(r.BaseRunner.State.StartingDuty.Slot) + validatorIndex := r.GetState().StartingDuty.ValidatorIndex + return &phase0.VoluntaryExit{ + Epoch: epoch, + ValidatorIndex: validatorIndex, + }, nil +} + +func (r *VoluntaryExitRunner) GetBaseRunner() *BaseRunner { + return r.BaseRunner +} + +func (r *VoluntaryExitRunner) GetNetwork() specssv.Network { + return r.network +} + +func (r *VoluntaryExitRunner) GetBeaconNode() specssv.BeaconNode { + return r.beacon +} + +func (r *VoluntaryExitRunner) GetShare() *spectypes.Share { + return r.BaseRunner.Share +} + +func (r *VoluntaryExitRunner) GetState() *State { + return r.BaseRunner.State +} + +func (r *VoluntaryExitRunner) GetValCheckF() specqbft.ProposedValueCheckF { + return r.valCheck +} + +func (r *VoluntaryExitRunner) GetSigner() spectypes.KeyManager { + return r.signer +} + +// Encode returns the encoded struct in bytes or error +func (r *VoluntaryExitRunner) Encode() ([]byte, error) { + return json.Marshal(r) +} + +// Decode returns error if decoding failed +func (r *VoluntaryExitRunner) Decode(data []byte) error { + return json.Unmarshal(data, &r) +} + +// GetRoot returns the root used for signing and verification +func (r *VoluntaryExitRunner) GetRoot() ([32]byte, error) { + marshaledRoot, err := r.Encode() + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not encode DutyRunnerState") + } + ret := sha256.Sum256(marshaledRoot) + return ret, nil +} diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 19fd0c71c8..b962418894 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -2,6 +2,9 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" specqbft "github.com/bloxapp/ssv-spec/qbft" @@ -9,12 +12,15 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + "go.uber.org/zap" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" ssvtesting "github.com/bloxapp/ssv/protocol/v2/ssv/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type MsgProcessingSpecTest struct { @@ -23,6 +29,7 @@ type MsgProcessingSpecTest struct { Duty *spectypes.Duty Messages []*spectypes.SSVMessage PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json // OutputMessages compares pre/ post signed partial sigs to output. We exclude consensus msgs as it's tested in consensus OutputMessages []*spectypes.SignedPartialSignatureMessage BeaconBroadcastedRoots []string @@ -36,6 +43,11 @@ func (test *MsgProcessingSpecTest) TestName() string { func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { logger := logging.TestLogger(t) + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + +func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { v := ssvtesting.BaseValidator(logger, spectestingutils.KeySetForShare(test.Runner.GetBaseRunner().Share)) v.DutyRunners[test.Runner.GetBaseRunner().BeaconRoleType] = test.Runner v.Network = test.Runner.GetNetwork().(specqbft.Network) // TODO need to align @@ -45,7 +57,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { lastErr = v.StartDuty(logger, test.Duty) } for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue @@ -57,7 +69,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected: %v", test.ExpectedError) } else { require.NoError(t, lastErr) } @@ -143,3 +155,43 @@ func (test *MsgProcessingSpecTest) compareOutputMsgs(t *testing.T, v *validator. index++ } } + +func (test *MsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, test.Name, testType) +} + +func overrideStateComparison(t *testing.T, test *MsgProcessingSpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/multi_msg_processing_type.go b/protocol/v2/ssv/spectest/multi_msg_processing_type.go index 0b4b926f6e..4d040782e2 100644 --- a/protocol/v2/ssv/spectest/multi_msg_processing_type.go +++ b/protocol/v2/ssv/spectest/multi_msg_processing_type.go @@ -1,10 +1,20 @@ package spectest -import "testing" +import ( + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/bloxapp/ssv/logging" + "go.uber.org/zap" +) type MultiMsgProcessingSpecTest struct { Name string Tests []*MsgProcessingSpecTest + + logger *zap.Logger } func (tests *MultiMsgProcessingSpecTest) TestName() string { @@ -12,10 +22,23 @@ func (tests *MultiMsgProcessingSpecTest) TestName() string { } func (tests *MultiMsgProcessingSpecTest) Run(t *testing.T) { + tests.logger = logging.TestLogger(t) + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - RunMsgProcessing(t, test) + test.RunAsPartOfMultiTest(t, tests.logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiMsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, path, testType) + } +} diff --git a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go index c8bf0cae80..cfac13ec9d 100644 --- a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go +++ b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go @@ -2,14 +2,19 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type StartNewRunnerDutySpecTest struct { @@ -17,6 +22,7 @@ type StartNewRunnerDutySpecTest struct { Runner runner.Runner Duty *spectypes.Duty PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json OutputMessages []*spectypes.SignedPartialSignatureMessage ExpectedError string } @@ -25,7 +31,14 @@ func (test *StartNewRunnerDutySpecTest) TestName() string { return test.Name } -func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { +// overrideStateComparison overrides the state comparison to compare the runner state +func (test *StartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, test.Name, testType) +} + +func (test *StartNewRunnerDutySpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { err := test.Runner.StartNewDuty(logger, test.Duty) if len(test.ExpectedError) > 0 { require.EqualError(t, err, test.ExpectedError) @@ -84,6 +97,11 @@ func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:])) } +func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + type MultiStartNewRunnerDutySpecTest struct { Name string Tests []*StartNewRunnerDutySpecTest @@ -94,10 +112,56 @@ func (tests *MultiStartNewRunnerDutySpecTest) TestName() string { } func (tests *MultiStartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - test.Run(t, logger) + test.RunAsPartOfMultiTest(t, logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiStartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, path, testType) + } +} + +func overrideStateComparisonForStartNewRunnerDutySpecTest(t *testing.T, test *StartNewRunnerDutySpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index ccb15f0285..fae52f8e21 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -2,7 +2,6 @@ package spectest import ( "encoding/json" - "fmt" "os" "reflect" "strings" @@ -19,7 +18,6 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" @@ -41,105 +39,130 @@ func TestSSVMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test + r := prepareTest(t, logger, name, test) + if r != nil { + t.Run(r.name, func(t *testing.T) { + t.Parallel() + r.test(t) + }) + } + } +} - testName := strings.Split(name, "_")[1] - testType := strings.Split(name, "_")[0] +type runnable struct { + name string + test func(t *testing.T) +} - fmt.Printf("--------- %s - %s \n", testType, testName) +func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{}) *runnable { + testName := strings.Split(name, "_")[1] + testType := strings.Split(name, "_")[0] - switch testType { - case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &MsgProcessingSpecTest{ - Runner: &runner.AttesterRunner{}, - } - // TODO fix blinded test - if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { - continue - } - require.NoError(t, json.Unmarshal(byts, &typedTest)) + switch testType { + case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &MsgProcessingSpecTest{ + Runner: &runner.AttesterRunner{}, + } + // TODO: fix blinded test + if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { + logger.Info("skipping blinded block test", zap.String("test", testName)) + return nil + } + require.NoError(t, json.Unmarshal(byts, &typedTest)) - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunMsgProcessing(t, typedTest) - }) - case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*MsgProcessingSpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiMsgProcessingSpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): + typedTest := &MultiMsgProcessingSpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &messages.MsgSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &messages.MsgSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.SpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.SpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.MultiSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.MultiSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunSyncCommitteeAggProof(t, typedTest) - }) - case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*StartNewRunnerDutySpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiStartNewRunnerDutySpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): + typedTest := &MultiStartNewRunnerDutySpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) + } typedTest.Run(t, logger) - }) - default: - t.Fatalf("unsupported test type %s [%s]", testType, testName) + }, } + default: + t.Fatalf("unsupported test type %s [%s]", testType, testName) + return nil } } @@ -152,11 +175,13 @@ func newRunnerDutySpecTestFromMap(t *testing.T, m map[string]interface{}) *Start require.NoError(t, json.Unmarshal(byts, duty)) outputMsgs := make([]*spectypes.SignedPartialSignatureMessage, 0) - for _, msg := range m["OutputMessages"].([]interface{}) { - byts, _ = json.Marshal(msg) - typedMsg := &spectypes.SignedPartialSignatureMessage{} - require.NoError(t, json.Unmarshal(byts, typedMsg)) - outputMsgs = append(outputMsgs, typedMsg) + if v, ok := m["OutputMessages"].([]interface{}); ok { + for _, msg := range v { + byts, _ = json.Marshal(msg) + typedMsg := &spectypes.SignedPartialSignatureMessage{} + require.NoError(t, json.Unmarshal(byts, typedMsg)) + outputMsgs = append(outputMsgs, typedMsg) + } } ks := testingutils.KeySetForShare(&spectypes.Share{Quorum: uint64(baseRunnerMap["Share"].(map[string]interface{})["Quorum"].(float64))}) @@ -324,6 +349,10 @@ func baseRunnerForRole(logger *zap.Logger, role spectypes.BeaconRole, base *runn ret := ssvtesting.ValidatorRegistrationRunner(logger, ks) ret.(*runner.ValidatorRegistrationRunner).BaseRunner = base return ret + case spectypes.BNRoleVoluntaryExit: + ret := ssvtesting.VoluntaryExitRunner(logger, ks) + ret.(*runner.VoluntaryExitRunner).BaseRunner = base + return ret case testingutils.UnknownDutyType: ret := ssvtesting.UnknownDutyTypeRunner(logger, ks) ret.(*runner.AttesterRunner).BaseRunner = base diff --git a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go index 9e12cab157..2fd4091732 100644 --- a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go +++ b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go @@ -24,7 +24,7 @@ func RunSyncCommitteeAggProof(t *testing.T, test *synccommitteeaggregator.SyncCo lastErr := v.StartDuty(logger, &testingutils.TestingSyncCommitteeContributionDuty) for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 2d8fcc8095..7689d10073 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -23,14 +23,14 @@ var AttesterRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySe //} var ProposerRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { - return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), keySet) + return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet) } var ProposerBlindedBlockRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { ret := baseRunner( logger, spectypes.BNRoleProposer, - specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), + specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet, ) ret.(*runner.ProposerRunner).ProducesBlindedBlocks = true @@ -54,6 +54,10 @@ var ValidatorRegistrationRunner = func(logger *zap.Logger, keySet *spectestingut return ret } +var VoluntaryExitRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { + return baseRunner(logger, spectypes.BNRoleVoluntaryExit, nil, keySet) +} + var UnknownDutyTypeRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { return baseRunner(logger, spectestingutils.UnknownDutyType, spectestingutils.UnknownDutyValueCheck(), keySet) } @@ -144,6 +148,14 @@ var baseRunner = func(logger *zap.Logger, role spectypes.BeaconRole, valCheck sp net, km, ) + case spectypes.BNRoleVoluntaryExit: + return runner.NewVoluntaryExitRunner( + spectypes.BeaconTestNetwork, + share, + spectestingutils.NewTestingBeaconNode(), + net, + km, + ) case spectestingutils.UnknownDutyType: ret := runner.NewAttesterRunnner( spectypes.BeaconTestNetwork, diff --git a/protocol/v2/ssv/testing/validator.go b/protocol/v2/ssv/testing/validator.go index 844145bd8c..d006111c2b 100644 --- a/protocol/v2/ssv/testing/validator.go +++ b/protocol/v2/ssv/testing/validator.go @@ -7,6 +7,7 @@ import ( spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "go.uber.org/zap" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/protocol/v2/qbft/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" @@ -22,7 +23,7 @@ var BaseValidator = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet validator.Options{ Network: spectestingutils.NewTestingNetwork(), Beacon: spectestingutils.NewTestingBeaconNode(), - BeaconNetwork: spectypes.BeaconTestNetwork, + BeaconNetwork: networkconfig.TestNetwork.Beacon, Storage: testing.TestingStores(logger), SSVShare: &types.SSVShare{ Share: *spectestingutils.TestingShare(keySet), diff --git a/protocol/v2/ssv/validator/metrics.go b/protocol/v2/ssv/validator/metrics.go new file mode 100644 index 0000000000..ce1840736b --- /dev/null +++ b/protocol/v2/ssv/validator/metrics.go @@ -0,0 +1,45 @@ +package validator + +import ( + "time" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" +) + +type Metrics interface { + ValidatorInactive(publicKey []byte) + ValidatorNoIndex(publicKey []byte) + ValidatorError(publicKey []byte) + ValidatorReady(publicKey []byte) + ValidatorNotActivated(publicKey []byte) + ValidatorExiting(publicKey []byte) + ValidatorSlashed(publicKey []byte) + ValidatorNotFound(publicKey []byte) + ValidatorPending(publicKey []byte) + ValidatorRemoved(publicKey []byte) + ValidatorUnknown(publicKey []byte) + + queue.Metrics +} + +type NopMetrics struct{} + +func (n NopMetrics) ValidatorInactive([]byte) {} +func (n NopMetrics) ValidatorNoIndex([]byte) {} +func (n NopMetrics) ValidatorError([]byte) {} +func (n NopMetrics) ValidatorReady([]byte) {} +func (n NopMetrics) ValidatorNotActivated([]byte) {} +func (n NopMetrics) ValidatorExiting([]byte) {} +func (n NopMetrics) ValidatorSlashed([]byte) {} +func (n NopMetrics) ValidatorNotFound([]byte) {} +func (n NopMetrics) ValidatorPending([]byte) {} +func (n NopMetrics) ValidatorRemoved([]byte) {} +func (n NopMetrics) ValidatorUnknown([]byte) {} +func (n NopMetrics) IncomingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) OutgoingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) DroppedQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) MessageQueueSize(int) {} +func (n NopMetrics) MessageQueueCapacity(int) {} +func (n NopMetrics) MessageTimeInQueue(spectypes.MessageID, time.Duration) {} diff --git a/protocol/v2/ssv/validator/msgqueue_consumer.go b/protocol/v2/ssv/validator/msgqueue_consumer.go index 7ba5efb119..ba82efa396 100644 --- a/protocol/v2/ssv/validator/msgqueue_consumer.go +++ b/protocol/v2/ssv/validator/msgqueue_consumer.go @@ -28,7 +28,8 @@ type queueContainer struct { // HandleMessage handles a spectypes.SSVMessage. // TODO: accept DecodedSSVMessage once p2p is upgraded to decode messages during validation. -func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +// TODO: get rid of logger, add context +func (v *Validator) HandleMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { v.mtx.RLock() // read v.Queues defer v.mtx.RUnlock() @@ -37,22 +38,13 @@ func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) // fields.Role(msg.MsgID.GetRoleType())) if q, ok := v.Queues[msg.MsgID.GetRoleType()]; ok { - decodedMsg, err := queue.DecodeSSVMessage(logger, msg) - if err != nil { - logger.Warn("❗ failed to decode message", - zap.Error(err), - zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), - zap.String("msg_id", msg.MsgID.String()), - ) - return - } - if pushed := q.Q.TryPush(decodedMsg); !pushed { + if pushed := q.Q.TryPush(msg); !pushed { msgID := msg.MsgID.String() logger.Warn("❗ dropping message because the queue is full", zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), zap.String("msg_id", msgID)) } - // logger.Debug("📬 queue: pushed message", fields.MessageID(decodedMsg.MsgID), fields.MessageType(decodedMsg.MsgType)) + // logger.Debug("📬 queue: pushed message", fields.MessageID(msg.MsgID), fields.MessageType(msg.MsgType)) } else { logger.Error("❌ missing queue for role type", fields.Role(msg.MsgID.GetRoleType())) } diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index 3d03a44d4e..c9d2dbb431 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -9,6 +9,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" qbftcontroller "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -21,9 +22,10 @@ type NonCommitteeValidator struct { func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID, opts Options) *NonCommitteeValidator { // currently, only need domain & storage config := &qbft.Config{ - Domain: types.GetDefaultDomain(), - Storage: opts.Storage.Get(identifier.GetRoleType()), - Network: opts.Network, + Domain: types.GetDefaultDomain(), + Storage: opts.Storage.Get(identifier.GetRoleType()), + Network: opts.Network, + SignatureVerification: true, } ctrl := qbftcontroller.NewController(identifier[:], &opts.SSVShare.Share, types.GetDefaultDomain(), config, opts.FullNode) ctrl.StoredInstances = make(qbftcontroller.InstanceContainer, 0, nonCommitteeInstanceContainerCapacity(opts.FullNode)) @@ -39,7 +41,7 @@ func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID } } -func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { logger = logger.With(fields.PubKey(msg.MsgID.GetPubKey()), fields.Role(msg.MsgID.GetRoleType())) if err := validateMessage(ncv.Share.Share, msg); err != nil { diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index e1085dead6..8b32cfe0fa 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -6,6 +6,8 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" qbftctrl "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/types" @@ -19,7 +21,7 @@ const ( type Options struct { Network specqbft.Network Beacon specssv.BeaconNode - BeaconNetwork spectypes.BeaconNetwork + BeaconNetwork beacon.BeaconNetwork Storage *storage.QBFTStores SSVShare *types.SSVShare Signer spectypes.KeyManager @@ -30,6 +32,8 @@ type Options struct { BuilderProposals bool QueueSize int GasLimit uint64 + MessageValidator validation.MessageValidator + Metrics Metrics } func (o *Options) defaults() { diff --git a/protocol/v2/ssv/validator/startup.go b/protocol/v2/ssv/validator/startup.go index 4ca2c8acea..b316e8c9f2 100644 --- a/protocol/v2/ssv/validator/startup.go +++ b/protocol/v2/ssv/validator/startup.go @@ -1,9 +1,7 @@ package validator import ( - "context" "sync/atomic" - "time" "github.com/bloxapp/ssv-spec/p2p" spectypes "github.com/bloxapp/ssv-spec/types" @@ -56,7 +54,6 @@ func (v *Validator) Start(logger *zap.Logger) (started bool, err error) { return true, err } go v.StartQueueConsumer(logger, identifier, v.ProcessMessage) - go v.sync(logger, identifier) } return true, nil } @@ -73,27 +70,3 @@ func (v *Validator) Stop() { v.Queues = make(map[spectypes.BeaconRole]queueContainer) } } - -// sync performs highest decided sync -func (v *Validator) sync(logger *zap.Logger, mid spectypes.MessageID) { - ctx, cancel := context.WithCancel(v.ctx) - defer cancel() - - // TODO: config? - interval := time.Second - retries := 3 - - for ctx.Err() == nil { - err := v.Network.SyncHighestDecided(mid) - if err != nil { - logger.Debug("❌ failed to sync highest decided", zap.Error(err)) - retries-- - if retries > 0 { - interval *= 2 - time.Sleep(interval) - continue - } - } - return - } -} diff --git a/protocol/v2/ssv/validator/timer.go b/protocol/v2/ssv/validator/timer.go index 87013bd5dd..6b819b992b 100644 --- a/protocol/v2/ssv/validator/timer.go +++ b/protocol/v2/ssv/validator/timer.go @@ -10,12 +10,13 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) -func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() { - return func() { +func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF { + return func(round specqbft.Round) { v.mtx.RLock() // read-lock for v.Queues, v.state defer v.mtx.RUnlock() @@ -30,12 +31,12 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID return } - msg, err := v.createTimerMessage(identifier, height) + msg, err := v.createTimerMessage(identifier, height, round) if err != nil { logger.Debug("❗ failed to create timer msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, msg) + dec, err := queue.DecodeSSVMessage(msg) if err != nil { logger.Debug("❌ failed to decode timer msg", zap.Error(err)) return @@ -49,8 +50,11 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID } } -func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height) (*spectypes.SSVMessage, error) { - td := types.TimeoutData{Height: height} +func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height, round specqbft.Round) (*spectypes.SSVMessage, error) { + td := types.TimeoutData{ + Height: height, + Round: round, + } data, err := json.Marshal(td) if err != nil { return nil, errors.Wrap(err, "failed to marshal timeout data") diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index 7f1dd80d2e..0fa54de66a 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -13,6 +13,7 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" @@ -39,24 +40,31 @@ type Validator struct { dutyIDs *hashmap.Map[spectypes.BeaconRole, string] state uint32 + + messageValidator validation.MessageValidator } // NewValidator creates a new instance of Validator. func NewValidator(pctx context.Context, cancel func(), options Options) *Validator { options.defaults() + if options.Metrics == nil { + options.Metrics = &NopMetrics{} + } + v := &Validator{ - mtx: &sync.RWMutex{}, - ctx: pctx, - cancel: cancel, - DutyRunners: options.DutyRunners, - Network: options.Network, - Storage: options.Storage, - Share: options.SSVShare, - Signer: options.Signer, - Queues: make(map[spectypes.BeaconRole]queueContainer), - state: uint32(NotStarted), - dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + mtx: &sync.RWMutex{}, + ctx: pctx, + cancel: cancel, + DutyRunners: options.DutyRunners, + Network: options.Network, + Storage: options.Storage, + Share: options.SSVShare, + Signer: options.Signer, + Queues: make(map[spectypes.BeaconRole]queueContainer), + state: uint32(NotStarted), + dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + messageValidator: options.MessageValidator, } for _, dutyRunner := range options.DutyRunners { @@ -65,10 +73,9 @@ func NewValidator(pctx context.Context, cancel func(), options Options) *Validat // Setup the queue. role := dutyRunner.GetBaseRunner().BeaconRoleType - msgID := spectypes.NewMsgID(types.GetDefaultDomain(), options.SSVShare.ValidatorPubKey, role).String() v.Queues[role] = queueContainer{ - Q: queue.WithMetrics(queue.New(options.QueueSize), queue.NewPrometheusMetrics(msgID)), + Q: queue.WithMetrics(queue.New(options.QueueSize), options.Metrics), queueState: &queue.State{ HasRunningInstance: false, Height: 0, @@ -111,7 +118,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess return fmt.Errorf("could not get duty runner for msg ID %v", messageID) } - if err := validateMessage(v.Share.Share, msg.SSVMessage); err != nil { + if err := validateMessage(v.Share.Share, msg); err != nil { return fmt.Errorf("message invalid for msg ID %v: %w", messageID, err) } @@ -143,7 +150,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess } } -func validateMessage(share spectypes.Share, msg *spectypes.SSVMessage) error { +func validateMessage(share spectypes.Share, msg *queue.DecodedSSVMessage) error { if !share.ValidatorPubKey.MessageIDBelongs(msg.GetID()) { return errors.New("msg ID doesn't match validator ID") } diff --git a/protocol/v2/sync/handlers/decided_history.go b/protocol/v2/sync/handlers/decided_history.go deleted file mode 100644 index 3dc960cfcb..0000000000 --- a/protocol/v2/sync/handlers/decided_history.go +++ /dev/null @@ -1,57 +0,0 @@ -package handlers - -import ( - "fmt" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// HistoryHandler handler for decided history protocol -// TODO: add msg validation and report scores -func HistoryHandler(logger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting, maxBatchSize int) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := logger.With(zap.String("msg_id", fmt.Sprintf("%x", msg.MsgID))) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.DecidedHistoryType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - items := int(sm.Params.Height[1] - sm.Params.Height[0]) - if items > maxBatchSize { - sm.Params.Height[1] = sm.Params.Height[0] + specqbft.Height(maxBatchSize) - } - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instances, err := store.GetInstancesInRange(msgID[:], sm.Params.Height[0], sm.Params.Height[1]) - results := make([]*specqbft.SignedMessage, 0, len(instances)) - for _, instance := range instances { - results = append(results, instance.DecidedMessage) - } - sm.UpdateResults(err, results...) - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/sync/handlers/last_decided.go b/protocol/v2/sync/handlers/last_decided.go deleted file mode 100644 index 6b33579b0f..0000000000 --- a/protocol/v2/sync/handlers/last_decided.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "fmt" - - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// LastDecidedHandler handler for last-decided protocol -// TODO: add msg validation and report scores -func LastDecidedHandler(plogger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := plogger.With(fields.PubKey(msg.MsgID.GetPubKey())) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.LastDecidedType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instance, err := store.GetHighestInstance(msgID[:]) - if err != nil { - logger.Debug("❗ failed to get highest instance", zap.Error(err)) - } else if instance != nil { - sm.UpdateResults(err, instance.DecidedMessage) - } - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/testing/test_utils.go b/protocol/v2/testing/test_utils.go index 2b2f79e4c1..7994e60361 100644 --- a/protocol/v2/testing/test_utils.go +++ b/protocol/v2/testing/test_utils.go @@ -1,6 +1,7 @@ package testing import ( + "fmt" "os" "path" "path/filepath" @@ -145,9 +146,25 @@ func AggregateInvalidSign(t *testing.T, sks map[spectypes.OperatorID]*bls.Secret } func GetSpecTestJSON(path string, module string) ([]byte, error) { + p, err := GetSpecDir(path, module) + if err != nil { + return nil, fmt.Errorf("could not get spec test dir: %w", err) + } + return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(specTestPath))) +} + +// GetSpecDir returns the path to the ssv-spec module. +func GetSpecDir(path, module string) (string, error) { + if path == "" { + var err error + path, err = os.Getwd() + if err != nil { + return "", errors.New("could not get current directory") + } + } goModFile, err := getGoModFile(path) if err != nil { - return nil, errors.New("could not get go.mod file") + return "", errors.New("could not get go.mod file") } // check if there is a replace @@ -173,7 +190,7 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { } } if req == nil { - return nil, errors.Errorf("could not find %s module", specModule) + return "", errors.Errorf("could not find %s module", specModule) } modPath = req.Mod.Path modVersion = req.Mod.Version @@ -182,14 +199,14 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { // get module path p, err := GetModulePath(modPath, modVersion) if err != nil { - return nil, errors.Wrap(err, "could not get module path") + return "", errors.Wrap(err, "could not get module path") } if _, err := os.Stat(p); os.IsNotExist(err) { - return nil, errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) + return "", errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) } - return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(module), filepath.Clean(specTestPath))) + return filepath.Join(filepath.Clean(p), module), nil } func GetModulePath(name, version string) (string, error) { diff --git a/protocol/v2/types/bls.go b/protocol/v2/types/bls.go index 70d2b7cb0e..d4e2b39fb9 100644 --- a/protocol/v2/types/bls.go +++ b/protocol/v2/types/bls.go @@ -9,7 +9,7 @@ var blsPublicKeyCache *lru.Cache[string, bls.PublicKey] func init() { var err error - blsPublicKeyCache, err = lru.New[string, bls.PublicKey](10_000) + blsPublicKeyCache, err = lru.New[string, bls.PublicKey](128_000) if err != nil { panic(err) } diff --git a/protocol/v2/types/crypto.go b/protocol/v2/types/crypto.go index 24863a64cc..3f08b7ee5b 100644 --- a/protocol/v2/types/crypto.go +++ b/protocol/v2/types/crypto.go @@ -15,13 +15,11 @@ import ( // // TODO: rethink this function and consider moving/refactoring it. func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, domain spectypes.DomainType, sigType spectypes.SignatureType, operators []*spectypes.Operator) error { - // decode sig sign := &bls.Sign{} if err := sign.Deserialize(s); err != nil { return errors.Wrap(err, "failed to deserialize signature") } - // find operators pks := make([]bls.PublicKey, 0) for _, id := range data.GetSigners() { found := false @@ -41,13 +39,11 @@ func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, d } } - // compute root computedRoot, err := spectypes.ComputeSigningRoot(data, spectypes.ComputeSignatureDomain(domain, sigType)) if err != nil { return errors.Wrap(err, "could not compute signing root") } - // verify if res := sign.FastAggregateVerify(pks, computedRoot[:]); !res { return errors.New("failed to verify signature") } @@ -72,7 +68,6 @@ func VerifyReconstructedSignature(sig *bls.Sign, validatorPubKey []byte, root [3 return errors.Wrap(err, "could not deserialize validator pk") } - // verify reconstructed sig if res := sig.VerifyByte(&pk, root[:]); !res { return errors.New("could not reconstruct a valid signature") } diff --git a/protocol/v2/types/messages.go b/protocol/v2/types/messages.go index 121194142d..529b2ab821 100644 --- a/protocol/v2/types/messages.go +++ b/protocol/v2/types/messages.go @@ -34,6 +34,7 @@ type EventMsg struct { type TimeoutData struct { Height qbft.Height + Round qbft.Round } type ExecuteDutyData struct { @@ -57,11 +58,11 @@ func (m *EventMsg) GetExecuteDutyData() (*ExecuteDutyData, error) { } // Encode returns a msg encoded bytes or error -func (msg *EventMsg) Encode() ([]byte, error) { - return json.Marshal(msg) +func (m *EventMsg) Encode() ([]byte, error) { + return json.Marshal(m) } // Decode returns error if decoding failed -func (msg *EventMsg) Decode(data []byte) error { - return json.Unmarshal(data, &msg) +func (m *EventMsg) Decode(data []byte) error { + return json.Unmarshal(data, &m) } diff --git a/protocol/v2/types/signature_benchmark_linux_test.go b/protocol/v2/types/signature_benchmark_linux_test.go new file mode 100644 index 0000000000..d3a9b295e2 --- /dev/null +++ b/protocol/v2/types/signature_benchmark_linux_test.go @@ -0,0 +1,70 @@ +//go:build linux + +package types + +import ( + "crypto" + "crypto/sha256" + "fmt" + "testing" + + "github.com/microsoft/go-crypto-openssl/openssl" + "github.com/microsoft/go-crypto-openssl/openssl/bbig/bridge" +) + +func init() { + if err := openssl.Init(); err != nil { + panic(err) + } +} + +func BenchmarkVerifyPKCS1v15OpenSSL(b *testing.B) { + dataOpenSSL := []byte("This is test data for OpenSSL verification.") + hashedOpenSSL := sha256.Sum256(dataOpenSSL) + + priv, pub := newOpenSSLRSAKey(2048) + + sig, err := openssl.SignRSAPKCS1v15(priv, crypto.SHA256, hashedOpenSSL[:]) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := openssl.VerifyRSAPKCS1v15(pub, crypto.SHA256, hashedOpenSSL[:], sig) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignPKCS1v15OpenSSL(b *testing.B) { + dataOpenSSL := []byte("This is test data for OpenSSL verification.") + hashedOpenSSL := sha256.Sum256(dataOpenSSL) + + priv, _ := newOpenSSLRSAKey(2048) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := openssl.SignRSAPKCS1v15(priv, crypto.SHA256, hashedOpenSSL[:]) + if err != nil { + b.Fatal(err) + } + } +} + +func newOpenSSLRSAKey(size int) (*openssl.PrivateKeyRSA, *openssl.PublicKeyRSA) { + N, E, D, P, Q, Dp, Dq, Qinv, err := bridge.GenerateKeyRSA(size) + if err != nil { + panic(fmt.Sprintf("GenerateKeyRSA(%d): %v", size, err)) + } + priv, err := bridge.NewPrivateKeyRSA(N, E, D, P, Q, Dp, Dq, Qinv) + if err != nil { + panic(fmt.Sprintf("NewPrivateKeyRSA(%d): %v", size, err)) + } + pub, err := bridge.NewPublicKeyRSA(N, E) + if err != nil { + panic(fmt.Sprintf("NewPublicKeyRSA(%d): %v", size, err)) + } + return priv, pub +} diff --git a/protocol/v2/types/signature_benchmark_test.go b/protocol/v2/types/signature_benchmark_test.go new file mode 100644 index 0000000000..074182e10a --- /dev/null +++ b/protocol/v2/types/signature_benchmark_test.go @@ -0,0 +1,180 @@ +package types + +import ( + "crypto" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "testing" + + "github.com/herumi/bls-eth-go-binary/bls" +) + +var ( + privateKey *rsa.PrivateKey + publicKey *rsa.PublicKey + signature []byte + data = []byte("This is some test data for verification.") + hashed = sha256.Sum256(data) +) + +var ( + privateKeyPSS *rsa.PrivateKey + publicKeyPSS *rsa.PublicKey + pssSignature []byte + dataPSS = []byte("This is some test data for PSS verification.") + hashedPSS = sha256.Sum256(dataPSS) +) + +var ( + privateKeyFast *rsa.PrivateKey + publicKeyFast *rsa.PublicKey + signatureFast []byte + dataFast = []byte("This is test data for fast verification.") + hashedFast = md5.Sum(dataFast) +) + +func init() { + var err error + privateKey, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + publicKey = &privateKey.PublicKey + + signature, err = rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, hashed[:]) + if err != nil { + panic(err) + } + + if err := bls.Init(bls.BLS12_381); err != nil { + panic(err) + } + + if err := bls.SetETHmode(bls.EthModeLatest); err != nil { + panic(err) + } + + privateKeyPSS, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + publicKeyPSS = &privateKeyPSS.PublicKey + + pssOptions := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + } + + pssSignature, err = rsa.SignPSS(rand.Reader, privateKeyPSS, crypto.SHA256, hashedPSS[:], pssOptions) + if err != nil { + panic(err) + } + + privateKeyFast, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + publicKeyFast = &privateKeyFast.PublicKey + + signatureFast, err = rsa.SignPKCS1v15(rand.Reader, privateKeyFast, crypto.MD5, hashedFast[:]) + if err != nil { + panic(err) + } +} + +func BenchmarkVerifyBLS(b *testing.B) { + secKey := new(bls.SecretKey) + secKey.SetByCSPRNG() + pubKey := secKey.GetPublicKey() + msg := []byte("This is some test data for verification.") + sig := secKey.SignByte(msg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !sig.VerifyByte(pubKey, msg) { + b.Fatal("Verification failed") + } + } +} + +func BenchmarkVerifyPKCS1v15(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, hashed[:], signature) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkVerifyPKCS1v15FastHash(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rsa.VerifyPKCS1v15(publicKeyFast, crypto.MD5, hashedFast[:], signatureFast) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkVerifyPSS(b *testing.B) { + pssOptions := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rsa.VerifyPSS(publicKeyPSS, crypto.SHA256, hashedPSS[:], pssSignature, pssOptions) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignBLS(b *testing.B) { + secKey := new(bls.SecretKey) + secKey.SetByCSPRNG() + msg := []byte("This is some test data for verification.") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = secKey.SignByte(msg) + } +} + +func BenchmarkSignPKCS1v15(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, hashed[:]) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignPKCS1v15FastHash(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rsa.SignPKCS1v15(rand.Reader, privateKeyFast, crypto.MD5, hashedFast[:]) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSignPSS(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + pssOptions := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + } + + _, err := rsa.SignPSS(rand.Reader, privateKeyPSS, crypto.SHA256, hashedPSS[:], pssOptions) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/registry/storage/shares.go b/registry/storage/shares.go index 17572f0257..321bcd15c8 100644 --- a/registry/storage/shares.go +++ b/registry/storage/shares.go @@ -206,6 +206,13 @@ func ByActiveValidator() SharesFilter { } } +// ByAttesting filters for attesting validators. +func ByAttesting() SharesFilter { + return func(share *types.SSVShare) bool { + return share.HasBeaconMetadata() && share.BeaconMetadata.IsAttesting() + } +} + // ByClusterID filters by cluster id. func ByClusterID(clusterID []byte) SharesFilter { return func(share *types.SSVShare) bool { diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 641ad31360..2440971fe0 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -8,7 +8,11 @@ ApprovedChanges: ["256a3dc0f1eb7abf","22b66e9a63ba145b","12c1c3a1622fb7cc","1c44 "db32f358b6e8e2bb","f372e174e1f34c3b","bc47b3d202e8cd0d","86a6abca1a1c16d6","1655d21d5a4cad4","ac4e427097fc5533","6b4d5a114f8066ff", "9482fb9b6a953c48","5778a05e0976a6eb","24e2c7f54d5dd1d","2a8937e50d20faa9","587c629a67ef07ed","9d06d8e0ee4e1113","e624ec802068e711", "943be3ce709a99d3","5b3bb2d2262fe8be","c20c4c7ed8d1711d","b10c6fc7dd9eee7","c121cdaab6c1c698","e12b17f3910be26b","e47bf52e962c90af", - "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f"] + "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f", + "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575","2a580187c312c79a","bf8cf93c55c1eadb","6d877e24991465e4", + "b1c8e0148a4a755","2c25abb7c776bd54","a1754e08473bd1fa","4dbab14670fa155d","2a3667a499a23b16","930379d323dd95e8","65efe31656e8814f", + "1270cef2e573f846","aeafb38ca9114f12","2a83e3384b45f2d7","91fbb874b3ce2570","74ad51ca63526e1e","defd8406641d53a5"] + IgnoredIdentifiers: - logger ReducedPackageNames: diff --git a/utils/keys.go b/utils/keys.go index a237b17258..4287d03e38 100644 --- a/utils/keys.go +++ b/utils/keys.go @@ -25,7 +25,7 @@ func ECDSAPrivateKey(logger *zap.Logger, privateKey string) (*ecdsa.PrivateKey, if err != nil { return nil, errors.WithMessage(err, "failed to unmarshal passed privKey") } - privKey, err = commons.ConvertFromInterfacePrivKey(unmarshalledKey) + privKey, err = commons.ECDSAPrivFromInterface(unmarshalledKey) if err != nil { return nil, err } @@ -35,12 +35,12 @@ func ECDSAPrivateKey(logger *zap.Logger, privateKey string) (*ecdsa.PrivateKey, if err != nil { return nil, errors.WithMessage(err, "failed to generate 256k1 key") } - privKey, err = commons.ConvertFromInterfacePrivKey(privInterfaceKey) + privKey, err = commons.ECDSAPrivFromInterface(privInterfaceKey) if err != nil { return nil, err } } - interfacePriv, err := commons.ConvertToInterfacePrivkey(privKey) + interfacePriv, err := commons.ECDSAPrivToInterface(privKey) if err != nil { return nil, err } diff --git a/utils/rsaencryption/testingspace/vars.go b/utils/rsaencryption/testingspace/vars.go index 27a90cc0de..f94a8da859 100644 --- a/utils/rsaencryption/testingspace/vars.go +++ b/utils/rsaencryption/testingspace/vars.go @@ -2,6 +2,7 @@ package testing var ( // SkPem is a operator private key + // #nosec G101 (Potential hardcoded credentials: RSA private key) SkPem = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAowE7OEbwyLkvrZ0TU4jjooyIFxNvgrY8Fj+WslyZTlyj8UDf\nFrYh5Un2u4YMdAe+cPf1XK+A/P9XX7OB4nf1OoGVB6wrC/jhLbvOH650ryUYopeY\nhlSXxGnD4vcvTvcqLLB+ue2/iySxQLpZR/6VsT3fFrEonzFTqnFCwCF28iPnJVBj\nX6T/HcTJ55IDkbtotarU6cwwNOHnHkzWrv7ityPkR4Ge11hmVG9QjROt56ehXfFs\nFo5MqSvqpYplXkI/zUNm8j/lqEdU0RXUr41L2hyKY/pVjsgmeTsN7/ZqACkHye9F\nbkV9V/VbTh7hWVLTqGSh7BY/D7gwOwfuKiq2TwIDAQABAoIBADjO3Qyn7JKHt44S\nCAI82thzkZo5M8uiJx652pMeom8k6h3SNe18XCPEuzBvbzeg20YTpHdA0vtZIeJA\ndSuwEs7pCj86SWZKvm9p3FQ+QHwpuYQwwP9Py/Svx4z6CIrEqPYaLJAvw2mCyCN+\nzk7A8vpqTa1i4H1ae4YTIuhCwWlxe1ttD6rVUYfC2rVaFJ+b8JlzFRq4bnAR8yme\nrE4iAlfgTOj9zL814qRlYQeeZhMvA8T0qWUohbr1imo5XzIJZayLocvqhZEbk0dj\nq9qKWdIpAATRjWvb+7PkjmlwNjLOhJ1phtCkc/S4j2cvo9gcS7WafxaqCl/ix4Yt\n5KvPJ8ECgYEA0Em4nMMEFXbuSM/l5UCzv3kT6H/TYO7FVh071G7QAFoloxJBZDFV\n7fHsc+uCimlG2Xt3CrGo9tsOnF/ZgDKNmtDvvjxmlPnAb5g4uhXgYNMsKQShpeRW\n/ay8CmWbsRqXZaLoI5br2kCTLwsVz2hpabAzBOr2YV3vMRB5i7COYSMCgYEAyFgL\n3DkKwsTTyVyplenoAZaS/o0mKxZnffRnHNP5QgRfT4pQkuogk+MYAeBuGsc4cTi7\nrTtytUMBABXEKGIJkAbNoASHQMUcO1vvcwhBW7Ay+oxuc0JSlnaXjowS0C0o/4qr\nQ/rpUneir+Vu/N8+6edETRkNj+5unmePEe9NBuUCgYEAgtUr31woHot8FcRxNdW0\nkpstRCe20PZqgjMOt9t7UB1P8uSuqo7K2RHTYuUWNHb4h/ejyNXbumPTA6q5Zmta\nw1pmnWo3TXCrze0iBNFlBazf2kwMdbW+Zs2vuCAm8dIwMylnA6PzNj7FtRETfBqr\nzDVfdsFYTcTBUGJ21qXqaV0CgYEAmuMPEEv9WMTo43VDGsaCeq/Zpvii+I7SphsM\nmMn8m6Bbu1e4oUxmsU7RoanMFeHNbiMpXW1namGJ5XHufDYHJJVN5Zd6pYV+JRoX\njjxkoyke0Hs/bNZqmS7ITwlWBiHT33Rqohzaw8oAObLMUq2ZqyYDtQNYa90vIkH3\n5yq1x00CgYEAs4ztQhGRbeUlqnW6Z6yfRJ6XXYqdMPhxuBxvNn/dxJ10T4W2DUuC\njSdpGXrY+ECYyXUwlXBqbaKx1K5AQD7nmu9J3l0oMkX6tSBj1OE5MabATrsW6wvT\nhkTPJZMyPUYhoBkivPUKyQXswrQV/nUQAsAcLeJShTW4gSs0M6weQAc=\n-----END RSA PRIVATE KEY-----\n" // EncryptedKeyBase64 SkPem in base64 format EncryptedKeyBase64 = "NW/6N5Ubo5T+oiT9My2wXFH5TWT7iQnN8YKUlcoFeg00OzL1S4yKrIPemdr7SM3EbPeHlBtOAM3z+06EmaNlwVdBiexSRJmgnknqwt/Ught4pKZK/WdJAEhMRwjZ3nx1Qi1TYcw7oZBaOdeTdm65QEAnsqOHk1htnUTXqsqYxVF750u8JWq3Mzr3oCN65ydSJRQoSa+lo3DikIDrXSYe1LRY5epMRrOq3cujuykuAVZQWp1vzv4w4V6mffmxaDbPpln/w28FKCxYkxG/WhwGuXR1GK6IWr3xpXPKcG+lzfvlmh4UiK1Lad/YD460oMXOKZT8apn4HL4tl9HOb6RyWQ==" diff --git a/utils/testutils.go b/utils/testutils.go new file mode 100644 index 0000000000..bfd9290b25 --- /dev/null +++ b/utils/testutils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "sync" + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/golang/mock/gomock" + + "github.com/bloxapp/ssv/networkconfig" + mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" +) + +type SlotValue struct { + mu sync.Mutex + slot phase0.Slot +} + +func (sv *SlotValue) SetSlot(s phase0.Slot) { + sv.mu.Lock() + defer sv.mu.Unlock() + sv.slot = s +} + +func (sv *SlotValue) GetSlot() phase0.Slot { + sv.mu.Lock() + defer sv.mu.Unlock() + return sv.slot +} + +func SetupMockBeaconNetwork(t *testing.T, currentSlot *SlotValue) *mocknetwork.MockBeaconNetwork { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + if currentSlot == nil { + currentSlot = &SlotValue{} + currentSlot.SetSlot(32) + } + + mockBeaconNetwork := mocknetwork.NewMockBeaconNetwork(ctrl) + mockBeaconNetwork.EXPECT().GetBeaconNetwork().Return(networkconfig.TestNetwork.Beacon.GetBeaconNetwork()).AnyTimes() + + mockBeaconNetwork.EXPECT().EstimatedCurrentSlot().DoAndReturn( + func() phase0.Slot { + return currentSlot.GetSlot() + }, + ).AnyTimes() + mockBeaconNetwork.EXPECT().EstimatedEpochAtSlot(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) phase0.Epoch { + return phase0.Epoch(slot / 32) + }, + ).AnyTimes() + + return mockBeaconNetwork +}