From 08337aa7637b290bb8407c38b2a5dbe3e8383b3e Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Thu, 29 Apr 2021 04:58:30 +0300 Subject: [PATCH 1/9] [sonic-package-manager] first phase implementation of sonic-package-manager (#1527) What I did Implemented sonic-package-manager utility to manager SONiC Packages as per HLD Azure/SONiC#682. Implemented optional logic to migrate packages into new SONiC image in sonic-installer. How I did it Implemented as per HLD Azure/SONiC#682. How to verify it (Doc: Azure/SONiC#682) install package uninstall package upgrade package S2S upgrade THANK YOU, Stepan! --- doc/Command-Reference.md | 318 +++++- setup.py | 14 + .../bash_completion.d/sonic-package-manager | 8 + sonic-utilities-data/bash_completion.d/spm | 1 + sonic-utilities-data/templates/dump.sh.j2 | 29 + .../templates/service_mgmt.sh.j2 | 149 +++ .../templates/sonic.service.j2 | 39 + sonic-utilities-data/templates/timer.unit.j2 | 15 + sonic_installer/bootloader/aboot.py | 11 +- sonic_installer/bootloader/bootloader.py | 5 +- sonic_installer/common.py | 3 + sonic_installer/main.py | 127 ++- sonic_package_manager/__init__.py | 5 + sonic_package_manager/constraint.py | 166 ++++ sonic_package_manager/database.py | 222 +++++ sonic_package_manager/dockerapi.py | 226 +++++ sonic_package_manager/errors.py | 146 +++ sonic_package_manager/logger.py | 29 + sonic_package_manager/main.py | 460 +++++++++ sonic_package_manager/manager.py | 931 ++++++++++++++++++ sonic_package_manager/manifest.py | 210 ++++ sonic_package_manager/metadata.py | 185 ++++ sonic_package_manager/package.py | 53 + sonic_package_manager/progress.py | 52 + sonic_package_manager/reference.py | 30 + sonic_package_manager/registry.py | 157 +++ .../service_creator/__init__.py | 3 + .../service_creator/creator.py | 342 +++++++ .../service_creator/feature.py | 108 ++ .../service_creator/sonic_db.py | 98 ++ .../service_creator/utils.py | 17 + sonic_package_manager/source.py | 183 ++++ sonic_package_manager/utils.py | 42 + sonic_package_manager/version.py | 23 + tests/sonic_package_manager/conftest.py | 377 +++++++ tests/sonic_package_manager/test_cli.py | 63 ++ .../sonic_package_manager/test_constraint.py | 76 ++ tests/sonic_package_manager/test_database.py | 89 ++ tests/sonic_package_manager/test_manager.py | 322 ++++++ tests/sonic_package_manager/test_manifest.py | 74 ++ tests/sonic_package_manager/test_metadata.py | 37 + tests/sonic_package_manager/test_reference.py | 18 + tests/sonic_package_manager/test_registry.py | 15 + .../test_service_creator.py | 171 ++++ tests/sonic_package_manager/test_utils.py | 8 + 45 files changed, 5633 insertions(+), 24 deletions(-) create mode 100644 sonic-utilities-data/bash_completion.d/sonic-package-manager create mode 120000 sonic-utilities-data/bash_completion.d/spm create mode 100644 sonic-utilities-data/templates/dump.sh.j2 create mode 100644 sonic-utilities-data/templates/service_mgmt.sh.j2 create mode 100644 sonic-utilities-data/templates/sonic.service.j2 create mode 100644 sonic-utilities-data/templates/timer.unit.j2 create mode 100644 sonic_package_manager/__init__.py create mode 100644 sonic_package_manager/constraint.py create mode 100644 sonic_package_manager/database.py create mode 100644 sonic_package_manager/dockerapi.py create mode 100644 sonic_package_manager/errors.py create mode 100644 sonic_package_manager/logger.py create mode 100644 sonic_package_manager/main.py create mode 100644 sonic_package_manager/manager.py create mode 100644 sonic_package_manager/manifest.py create mode 100644 sonic_package_manager/metadata.py create mode 100644 sonic_package_manager/package.py create mode 100644 sonic_package_manager/progress.py create mode 100644 sonic_package_manager/reference.py create mode 100644 sonic_package_manager/registry.py create mode 100644 sonic_package_manager/service_creator/__init__.py create mode 100644 sonic_package_manager/service_creator/creator.py create mode 100644 sonic_package_manager/service_creator/feature.py create mode 100644 sonic_package_manager/service_creator/sonic_db.py create mode 100644 sonic_package_manager/service_creator/utils.py create mode 100644 sonic_package_manager/source.py create mode 100644 sonic_package_manager/utils.py create mode 100644 sonic_package_manager/version.py create mode 100644 tests/sonic_package_manager/conftest.py create mode 100644 tests/sonic_package_manager/test_cli.py create mode 100644 tests/sonic_package_manager/test_constraint.py create mode 100644 tests/sonic_package_manager/test_database.py create mode 100644 tests/sonic_package_manager/test_manager.py create mode 100644 tests/sonic_package_manager/test_manifest.py create mode 100644 tests/sonic_package_manager/test_metadata.py create mode 100644 tests/sonic_package_manager/test_reference.py create mode 100644 tests/sonic_package_manager/test_registry.py create mode 100644 tests/sonic_package_manager/test_service_creator.py create mode 100644 tests/sonic_package_manager/test_utils.py diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 6615413255..ab4e28dbdc 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -143,6 +143,7 @@ * [Watermark Show commands](#watermark-show-commands) * [Watermark Config commands](#watermark-config-commands) * [Software Installation and Management](#software-installation-and-management) + * [SONiC Package Manager](#sonic-package-manager) * [SONiC Installer](#sonic-installer) * [Troubleshooting Commands](#troubleshooting-commands) * [Routing Stack](#routing-stack) @@ -7961,8 +7962,316 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#waterm ## Software Installation and Management -SONiC software can be installed in two methods, viz, "using sonic-installer tool", "ONIE Installer". +SONiC images can be installed in one of two methods: +1. From within a running SONiC image using the `sonic-installer` utility +2. From the vendor's bootloader (E.g., ONIE, Aboot, etc.) +SONiC packages are available as prebuilt Docker images and meant to be installed with the *sonic-package-manager* utility. + +### SONiC Package Manager + +The *sonic-package-manager* is a command line tool to manage (e.g. install, upgrade or uninstall) SONiC Packages. + +**sonic-package-manager list** + +This command lists all available SONiC packages, their desription, installed version and installation status. +SONiC package status can be *Installed*, *Not installed* or *Built-In*. "Built-In" status means that a feature is built-in to SONiC image and can't be upgraded or uninstalled. + +- Usage: + ``` + sonic-package-manager list + ``` + +- Example: + ``` + admin@sonic:~$ sonic-package-manager list + Name Repository Description Version Status + -------------- --------------------------- ---------------------------- --------- -------------- + cpu-report azure/cpu-report CPU report package N/A Not Installed + database docker-database SONiC database package 1.0.0 Built-In + dhcp-relay azure/docker-dhcp-relay SONiC dhcp-relay package 1.0.0 Installed + fpm-frr docker-fpm-frr SONiC fpm-frr package 1.0.0 Built-In + lldp docker-lldp SONiC lldp package 1.0.0 Built-In + macsec docker-macsec SONiC macsec package 1.0.0 Built-In + mgmt-framework docker-sonic-mgmt-framework SONiC mgmt-framework package 1.0.0 Built-In + nat docker-nat SONiC nat package 1.0.0 Built-In + pmon docker-platform-monitor SONiC pmon package 1.0.0 Built-In + radv docker-router-advertiser SONiC radv package 1.0.0 Built-In + sflow docker-sflow SONiC sflow package 1.0.0 Built-In + snmp docker-snmp SONiC snmp package 1.0.0 Built-In + swss docker-orchagent SONiC swss package 1.0.0 Built-In + syncd docker-syncd-mlnx SONiC syncd package 1.0.0 Built-In + teamd docker-teamd SONiC teamd package 1.0.0 Built-In + telemetry docker-sonic-telemetry SONiC telemetry package 1.0.0 Built-In + ``` + +**sonic-package-manager repository add** + +This command will add a new repository as source for SONiC packages to the database. *NOTE*: requires elevated (root) privileges to run + +- Usage: + ``` + Usage: sonic-package-manager repository add [OPTIONS] NAME REPOSITORY + + Add a new repository to database. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + --default-reference TEXT Default installation reference. Can be a tag or + sha256 digest in repository. + --description TEXT Optional package entry description. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager repository add \ + cpu-report azure/sonic-cpu-report --default-reference 1.0.0 + ``` + +**sonic-package-manager repository remove** + +This command will remove a repository as source for SONiC packages from the database . The package has to be *Not Installed* in order to be removed from package database. *NOTE*: requires elevated (root) privileges to run + +- Usage: + ``` + Usage: sonic-package-manager repository remove [OPTIONS] NAME + + Remove repository from database. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager repository remove cpu-report + ``` + +**sonic-package-manager install** + +This command pulls and installs a package on SONiC host. *NOTE*: this command requires elevated (root) privileges to run + +- Usage: + ``` + Usage: sonic-package-manager install [OPTIONS] [PACKAGE_EXPR] + + Install/Upgrade package using [PACKAGE_EXPR] in format + "[=|@]". + + The repository to pull the package from is resolved by lookup in + package database, thus the package has to be added via "sonic- + package-manager repository add" command. + + In case when [PACKAGE_EXPR] is a package name "" this command + will install or upgrade to a version referenced by "default- + reference" in package database. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + --enable Set the default state of the feature to enabled + and enable feature right after installation. NOTE: + user needs to execute "config save -y" to make + this setting persistent. + --set-owner [local|kube] Default owner configuration setting for a feature. + --from-repository TEXT Fetch package directly from image registry + repository. NOTE: This argument is mutually + exclusive with arguments: [package_expr, + from_tarball]. + --from-tarball FILE Fetch package from saved image tarball. NOTE: This + argument is mutually exclusive with arguments: + [package_expr, from_repository]. + -f, --force Force operation by ignoring package dependency + tree and package manifest validation failures. + -y, --yes Automatically answer yes on prompts. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG. + Default is INFO. + --skip-host-plugins Do not install host OS plugins provided by the + package (CLI, etc). NOTE: In case when package + host OS plugins are set as mandatory in package + manifest this option will fail the installation. + --allow-downgrade Allow package downgrade. By default an attempt to + downgrade the package will result in a failure + since downgrade might not be supported by the + package, thus requires explicit request from the + user. + --help Show this message and exit.. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager install dhcp-relay=1.0.2 + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install dhcp-relay@latest + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install dhcp-relay@sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install --from-repository azure/sonic-cpu-report:latest + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install --from-tarball sonic-docker-image.gz + ``` + +**sonic-package-manager uninstall** + +This command uninstalls package from SONiC host. User needs to stop the feature prior to uninstalling it. +*NOTE*: this command requires elevated (root) privileges to run. + +- Usage: + ``` + Usage: sonic-package-manager uninstall [OPTIONS] NAME + + Uninstall package. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + -f, --force Force operation by ignoring package dependency tree and + package manifest validation failures. + -y, --yes Automatically answer yes on prompts. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default + is INFO. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager uninstall dhcp-relay + ``` + +**sonic-package-manager reset** + +This comamnd resets the package by reinstalling it to its default version. *NOTE*: this command requires elevated (root) privileges to run. + +- Usage: + ``` + Usage: sonic-package-manager reset [OPTIONS] NAME + + Reset package to the default version. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + -f, --force Force operation by ignoring package dependency tree and + package manifest validation failures. + -y, --yes Automatically answer yes on prompts. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default + is INFO. + --skip-host-plugins Do not install host OS plugins provided by the package + (CLI, etc). NOTE: In case when package host OS plugins + are set as mandatory in package manifest this option + will fail the installation. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager reset dhcp-relay + ``` + +**sonic-package-manager show package versions** + +This command will retrieve a list of all available versions for the given package from the configured upstream repository + +- Usage: + ``` + Usage: sonic-package-manager show package versions [OPTIONS] NAME + + Show available versions. + + Options: + --all Show all available tags in repository. + --plain Plain output. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sonic-package-manager show package versions dhcp-relay + • 1.0.0 + • 1.0.2 + • 2.0.0 + ``` + ``` + admin@sonic:~$ sonic-package-manager show package versions dhcp-relay --plain + 1.0.0 + 1.0.2 + 2.0.0 + ``` + ``` + admin@sonic:~$ sonic-package-manager show package versions dhcp-relay --all + • 1.0.0 + • 1.0.2 + • 2.0.0 + • latest + ``` + +**sonic-package-manager show package changelog** + +This command fetches the changelog from the package manifest and displays it. *NOTE*: package changelog can be retrieved from registry or read from image tarball without installing it. + +- Usage: + ``` + Usage: sonic-package-manager show package changelog [OPTIONS] [PACKAGE_EXPR] + + Show package changelog. + + Options: + --from-repository TEXT Fetch package directly from image registry + repository NOTE: This argument is mutually exclusive + with arguments: [from_tarball, package_expr]. + --from-tarball FILE Fetch package from saved image tarball NOTE: This + argument is mutually exclusive with arguments: + [package_expr, from_repository]. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sonic-package-manager show package changelog dhcp-relay + 1.0.0: + + • Initial release + + Author (author@email.com) Mon, 25 May 2020 12:25:00 +0300 + ``` + +**sonic-package-manager show package manifest** + +This command fetches the package manifest and displays it. *NOTE*: package manifest can be retrieved from registry or read from image tarball without installing it. + +- Usage: + ``` + Usage: sonic-package-manager show package manifest [OPTIONS] [PACKAGE_EXPR] + + Show package manifest. + + Options: + --from-repository TEXT Fetch package directly from image registry + repository NOTE: This argument is mutually exclusive + with arguments: [package_expr, from_tarball]. + --from-tarball FILE Fetch package from saved image tarball NOTE: This + argument is mutually exclusive with arguments: + [from_repository, package_expr]. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sonic-package-manager show package manifest dhcp-relay=2.0.0 + { + "version": "1.0.0", + "package": { + "version": "2.0.0", + "depends": [ + "database>=1.0.0,<2.0.0" + ] + }, + "service": { + "name": "dhcp_relay" + } + } + ``` ### SONiC Installer This is a command line tool available as part of the SONiC software; If the device is already running the SONiC software, this tool can be used to install an alternate image in the partition. @@ -8033,6 +8342,13 @@ This command is used to install a new image on the alternate image partition. T Done ``` +Installing a new image using the sonic-installer will keep using the packages installed on the currently running SONiC image and automatically migrate those. In order to perform clean SONiC installation use the *--skip-package-migration* option: + +- Example: + ``` + admin@sonic:~$ sudo sonic-installer install https://sonic-jenkins.westus.cloudapp.azure.com/job/xxxx/job/buildimage-xxxx-all/xxx/artifact/target/sonic-xxxx.bin --skip-package-migration + ``` + **sonic-installer set_default** This command is be used to change the image which can be loaded by default in all the subsequent reboots. diff --git a/setup.py b/setup.py index cd706eb433..15f93b46f7 100644 --- a/setup.py +++ b/setup.py @@ -48,6 +48,8 @@ 'show.plugins', 'sonic_installer', 'sonic_installer.bootloader', + 'sonic_package_manager', + 'sonic_package_manager.service_creator', 'tests', 'undebug', 'utilities_common', @@ -151,13 +153,21 @@ 'sonic-clear = clear.main:cli', 'sonic-installer = sonic_installer.main:sonic_installer', 'sonic_installer = sonic_installer.main:sonic_installer', # Deprecated + 'sonic-package-manager = sonic_package_manager.main:cli', + 'spm = sonic_package_manager.main:cli', 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', ] }, install_requires=[ 'click==7.0', + 'click-log==0.3.2', + 'docker==4.4.4', + 'docker-image-py==0.1.10', + 'filelock==3.0.12', + 'enlighten==1.8.0', 'ipaddress==1.0.23', + 'jinja2==2.11.3', 'jsondiff==1.2.0', 'jsonpatch==1.32.0', 'm2crypto==0.31.0', @@ -165,6 +175,8 @@ 'netaddr==0.8.0', 'netifaces==0.10.7', 'pexpect==4.8.0', + 'poetry-semver==0.1.0', + 'prettyprinter==0.18.0', 'pyroute2==0.5.14', 'requests==2.25.0', 'sonic-config-engine', @@ -173,6 +185,7 @@ 'sonic-yang-mgmt', 'swsssdk>=2.0.1', 'tabulate==0.8.2', + 'www-authenticate==0.9.2', 'xmltodict==0.12.0', ], setup_requires= [ @@ -180,6 +193,7 @@ 'wheel' ], tests_require = [ + 'pyfakefs', 'pytest', 'mockredispy>=2.9.3', 'deepdiff==5.2.3' diff --git a/sonic-utilities-data/bash_completion.d/sonic-package-manager b/sonic-utilities-data/bash_completion.d/sonic-package-manager new file mode 100644 index 0000000000..a8a2456603 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/sonic-package-manager @@ -0,0 +1,8 @@ +_sonic_package_manager_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _SONIC_PACKAGE_MANAGER_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _sonic_package_manager_completion -o default sonic-package-manager; diff --git a/sonic-utilities-data/bash_completion.d/spm b/sonic-utilities-data/bash_completion.d/spm new file mode 120000 index 0000000000..3fff069223 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/spm @@ -0,0 +1 @@ +sonic-package-manager \ No newline at end of file diff --git a/sonic-utilities-data/templates/dump.sh.j2 b/sonic-utilities-data/templates/dump.sh.j2 new file mode 100644 index 0000000000..ebb7ed8f24 --- /dev/null +++ b/sonic-utilities-data/templates/dump.sh.j2 @@ -0,0 +1,29 @@ +#!/bin/bash + +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# + +service="{{ manifest.service.name }}" +dump_command="{{ manifest.package['debug-dump'] }}" +container_re="^${service}[0-9]*$" +{% raw %} +container_ids="$(docker ps -f name=${container_re} -f status=running --format {{.Names}})" +{% endraw %} +tmp_dir=$(mktemp -d) +tmp_dump_dir="$tmp_dir/$service" +tmp_archive=$(mktemp) + +mkdir -p "$tmp_dump_dir" + +for container_id in $container_ids; do + docker exec -t "${container_id}" ${dump_command} &> "${tmp_dump_dir}/${container_id}" +done + + +tar -C $(dirname $tmp_dump_dir) -cf $tmp_archive $service + +cat $tmp_archive +rm $tmp_archive +rm -rf $tmp_dir diff --git a/sonic-utilities-data/templates/service_mgmt.sh.j2 b/sonic-utilities-data/templates/service_mgmt.sh.j2 new file mode 100644 index 0000000000..e46ba47380 --- /dev/null +++ b/sonic-utilities-data/templates/service_mgmt.sh.j2 @@ -0,0 +1,149 @@ +#!/bin/bash + +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# + +SERVICE="{{ manifest.service.name }}" +NAMESPACE_PREFIX="asic" +SONIC_DB_CLI="sonic-db-cli" +TMPDIR="/tmp/" +DEBUGLOG="${TMPDIR}/${SERVICE}.log" +[[ ! -z $DEV ]] && DEBUGLOG="${TMPDIR}/${SERVICE}-${DEV}.log" +[[ ! -z $DEV ]] && NET_NS="${NAMESPACE_PREFIX}${DEV}" # name of the network namespace +[[ ! -z $DEV ]] && SONIC_DB_CLI="${SONIC_DB_CLI} -n ${NET_NS}" + +{%- for service in manifest.service.dependent %} +{%- if service in multi_instance_services %} +MULTI_INST_DEPENDENT="${MULTI_INST_DEPENDENT} {{ service }}" +{%- else %} +DEPENDENT="${DEPENDENT} {{ service }}" +{%- endif %} +{%- endfor %} + +# Update dependent list based on other packages requirements +if [[ -f /etc/sonic/${SERVICE}_dependent ]]; then + DEPENDENT="${DEPENDENT} $(cat /etc/sonic/${SERVICE}_dependent)" +fi + +if [[ -f /etc/sonic/${SERVICE}_multi_inst_dependent ]]; then + MULTI_INST_DEPENDENT="${MULTI_INST_DEPENDENT} cat /etc/sonic/${SERVICE}_multi_inst_dependent" +fi + +function debug() +{ + /usr/bin/logger $1 + /bin/echo `date` "- $1" >> ${DEBUGLOG} +} + +function check_warm_boot() +{ + SYSTEM_WARM_START=`$SONIC_DB_CLI STATE_DB hget "WARM_RESTART_ENABLE_TABLE|system" enable` + SERVICE_WARM_START=`$SONIC_DB_CLI STATE_DB hget "WARM_RESTART_ENABLE_TABLE|${SERVICE}" enable` + if [[ x"$SYSTEM_WARM_START" == x"true" ]] || [[ x"$SERVICE_WARM_START" == x"true" ]]; then + WARM_BOOT="true" +{#- TODO: restore count validation for SONiC packages #} + else + WARM_BOOT="false" + fi +} + +function check_fast_boot() +{ + if [[ $($SONIC_DB_CLI STATE_DB GET "FAST_REBOOT|system") == "1" ]]; then + FAST_BOOT="true" + else + FAST_BOOT="false" + fi +} + +function start_dependent_services() { + if [[ x"$WARM_BOOT" != x"true" ]]; then + for dep in ${DEPENDENT}; do + /bin/systemctl start ${dep} + done + for dep in ${MULTI_INST_DEPENDENT}; do + if [[ ! -z $DEV ]]; then + /bin/systemctl start ${dep}@$DEV + else + /bin/systemctl start ${dep} + fi + done + fi +} + +function stop_dependent_services() { + if [[ x"$WARM_BOOT" != x"true" ]] && [[ x"$FAST_BOOT" != x"true" ]]; then + for dep in ${DEPENDENT}; do + /bin/systemctl stop ${dep} + done + for dep in ${MULTI_INST_DEPENDENT}; do + if [[ ! -z $DEV ]]; then + /bin/systemctl stop ${dep}@$DEV + else + /bin/systemctl stop ${dep} + fi + done + fi +} + +function start() { + debug "Starting ${SERVICE}$DEV service..." + + # start service docker + /usr/bin/${SERVICE}.sh start $DEV + debug "Started ${SERVICE}$DEV service..." + +{%- if manifest.service["post-start-action"] %} + docker exec -t ${SERVICE}${DEV} {{ manifest.service["post-start-action"] }} +{%- endif %} +} + +function wait() { + start_dependent_services + + if [[ ! -z $DEV ]]; then + /usr/bin/${SERVICE}.sh wait $DEV + else + /usr/bin/${SERVICE}.sh wait + fi +} + +function stop() { + debug "Stopping ${SERVICE}$DEV service..." + +{%- if manifest.service["pre-shutdown-action"] %} + docker exec -t ${SERVICE}${DEV} {{ manifest.service["pre-shutdown-action"] }} +{%- endif %} + + # For WARM/FAST boot do not perform service stop + if [[ x"$WARM_BOOT" != x"true" ]] && [[ x"$FAST_BOOT" != x"true" ]]; then + /usr/bin/${SERVICE}.sh stop $DEV + else + docker kill ${SERVICE}$DEV &> /dev/null || debug "Docker ${SERVICE}$DEV is not running ($?) ..." + fi + + debug "Stopped ${SERVICE}$DEV service..." + + stop_dependent_services +} + +OP=$1 +DEV=$2 + +check_warm_boot +check_fast_boot + +debug "Fast boot flag: ${SERVICE}$DEV ${FAST_BOOT}." +debug "Warm boot flag: ${SERVICE}$DEV ${WARM_BOOT}." + +case "$OP" in + start|wait|stop) + $1 + ;; + *) + echo "Usage: $0 {start|wait|stop}" + exit 1 + ;; +esac diff --git a/sonic-utilities-data/templates/sonic.service.j2 b/sonic-utilities-data/templates/sonic.service.j2 new file mode 100644 index 0000000000..72d6ab698c --- /dev/null +++ b/sonic-utilities-data/templates/sonic.service.j2 @@ -0,0 +1,39 @@ +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# +{%- set path = '/usr/local/bin' %} +{%- set multi_instance = multi_instance|default(False) %} +{%- set multi_instance_services = multi_instance_services|default([]) %} +[Unit] +Description={{ manifest.service.name }} container +{%- for service in manifest.service.requires %} +Requires={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- for service in manifest.service.requisite %} +Requisite={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- for service in manifest.service.after %} +After={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- for service in manifest.service.before %} +Before={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +BindsTo=sonic.target +After=sonic.target +StartLimitIntervalSec=1200 +StartLimitBurst=3 + +[Service] +ExecStartPre={{path}}/{{manifest.service.name}}.sh start{% if multi_instance %} %i{% endif %} +ExecStart={{path}}/{{manifest.service.name}}.sh wait{% if multi_instance %} %i{% endif %} +ExecStop={{path}}/{{manifest.service.name}}.sh stop{% if multi_instance %} %i{% endif %} +RestartSec=30 + +{%- if not manifest.service.delayed %} +[Install] +WantedBy=sonic.target +{%- for service in manifest.service["wanted-by"] %} +WantedBy={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- endif %} diff --git a/sonic-utilities-data/templates/timer.unit.j2 b/sonic-utilities-data/templates/timer.unit.j2 new file mode 100644 index 0000000000..a757b8deb8 --- /dev/null +++ b/sonic-utilities-data/templates/timer.unit.j2 @@ -0,0 +1,15 @@ +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# +[Unit] +Description=Delays {{ manifest.service.name }} until SONiC has started +PartOf={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service + +[Timer] +OnUnitActiveSec=0 sec +OnBootSec=3min 30 sec +Unit={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service + +[Install] +WantedBy=timers.target sonic.target diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index 3bf3e297e7..a2ef2acf4f 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -19,7 +19,6 @@ HOST_PATH, IMAGE_DIR_PREFIX, IMAGE_PREFIX, - ROOTFS_NAME, run_command, run_command_or_raise, ) @@ -189,14 +188,14 @@ def _get_swi_file_offset(self, swipath, filename): return f._fileobj.tell() # pylint: disable=protected-access @contextmanager - def get_rootfs_path(self, image_path): - rootfs_path = os.path.join(image_path, ROOTFS_NAME) - if os.path.exists(rootfs_path) and not isSecureboot(): - yield rootfs_path + def get_path_in_image(self, image_path, path): + path_in_image = os.path.join(image_path, path) + if os.path.exists(path_in_image) and not isSecureboot(): + yield path_in_image return swipath = os.path.join(image_path, DEFAULT_SWI_IMAGE) - offset = self._get_swi_file_offset(swipath, ROOTFS_NAME) + offset = self._get_swi_file_offset(swipath, path) loopdev = subprocess.check_output(['losetup', '-f']).decode('utf8').rstrip() try: diff --git a/sonic_installer/bootloader/bootloader.py b/sonic_installer/bootloader/bootloader.py index b59c9edccd..a6694977ae 100644 --- a/sonic_installer/bootloader/bootloader.py +++ b/sonic_installer/bootloader/bootloader.py @@ -9,7 +9,6 @@ HOST_PATH, IMAGE_DIR_PREFIX, IMAGE_PREFIX, - ROOTFS_NAME, ) class Bootloader(object): @@ -71,6 +70,6 @@ def get_image_path(cls, image): return image.replace(IMAGE_PREFIX, prefix) @contextmanager - def get_rootfs_path(self, image_path): + def get_path_in_image(self, image_path, path_in_image): """returns the path to the squashfs""" - yield path.join(image_path, ROOTFS_NAME) + yield path.join(image_path, path_in_image) diff --git a/sonic_installer/common.py b/sonic_installer/common.py index c49aaac032..ac1416789f 100644 --- a/sonic_installer/common.py +++ b/sonic_installer/common.py @@ -14,6 +14,9 @@ IMAGE_PREFIX = 'SONiC-OS-' IMAGE_DIR_PREFIX = 'image-' ROOTFS_NAME = 'fs.squashfs' +UPPERDIR_NAME = 'rw' +WORKDIR_NAME = 'work' +DOCKERDIR_NAME = 'docker' # Run bash command and print output to stdout def run_command(command): diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 92ad7677f4..12a2ab7e0e 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -1,4 +1,5 @@ import configparser +import contextlib import os import re import subprocess @@ -11,7 +12,14 @@ from swsscommon.swsscommon import SonicV2Connector from .bootloader import get_bootloader -from .common import run_command, run_command_or_raise, IMAGE_PREFIX +from .common import ( + run_command, run_command_or_raise, + IMAGE_PREFIX, + ROOTFS_NAME, + UPPERDIR_NAME, + WORKDIR_NAME, + DOCKERDIR_NAME, +) from .exception import SonicRuntimeException SYSLOG_IDENTIFIER = "sonic-installer" @@ -218,17 +226,48 @@ def print_deprecation_warning(deprecated_cmd_or_subcmd, new_cmd_or_subcmd): fg="red", err=True) click.secho("Please use '{}' instead".format(new_cmd_or_subcmd), fg="red", err=True) -def update_sonic_environment(click, bootloader, binary_image_version): + +def mount_squash_fs(squashfs_path, mount_point): + run_command_or_raise(["mkdir", "-p", mount_point]) + run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point]) + + +def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True): + flags = [] + if read_only: + flags.append("-r") + if force: + flags.append("-f") + if recursive: + flags.append("-R") + run_command_or_raise(["umount", *flags, mount_point]) + if remove_dir: + run_command_or_raise(["rm", "-rf", mount_point]) + + +def mount_overlay_fs(lowerdir, upperdir, workdir, mount_point): + run_command_or_raise(["mkdir", "-p", mount_point]) + overlay_options = "rw,relatime,lowerdir={},upperdir={},workdir={}".format(lowerdir, upperdir, workdir) + run_command_or_raise(["mount", "overlay", "-t", "overlay", "-o", overlay_options, mount_point]) + + +def mount_bind(source, mount_point): + run_command_or_raise(["mkdir", "-p", mount_point]) + run_command_or_raise(["mount", "--bind", source, mount_point]) + + +def mount_procfs_chroot(root): + run_command_or_raise(["chroot", root, "mount", "proc", "/proc", "-t", "proc"]) + + +def mount_sysfs_chroot(root): + run_command_or_raise(["chroot", root, "mount", "sysfs", "/sys", "-t", "sysfs"]) + + +def update_sonic_environment(bootloader, binary_image_version): """Prepare sonic environment variable using incoming image template file. If incoming image template does not exist use current image template file. """ - def mount_next_image_fs(squashfs_path, mount_point): - run_command_or_raise(["mkdir", "-p", mount_point]) - run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point]) - - def umount_next_image_fs(mount_point): - run_command_or_raise(["umount", "-rf", mount_point]) - run_command_or_raise(["rm", "-rf", mount_point]) SONIC_ENV_TEMPLATE_FILE = os.path.join("usr", "share", "sonic", "templates", "sonic-environment.j2") SONIC_VERSION_YML_FILE = os.path.join("etc", "sonic", "sonic_version.yml") @@ -239,9 +278,9 @@ def umount_next_image_fs(mount_point): env_dir = os.path.join(new_image_dir, "sonic-config") env_file = os.path.join(env_dir, "sonic-environment") - with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path: + with bootloader.get_path_in_image(new_image_dir, ROOTFS_NAME) as new_image_squashfs_path: try: - mount_next_image_fs(new_image_squashfs_path, new_image_mount) + mount_squash_fs(new_image_squashfs_path, new_image_mount) next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE) next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE) @@ -264,7 +303,62 @@ def umount_next_image_fs(mount_point): os.remove(env_file) os.rmdir(env_dir) finally: - umount_next_image_fs(new_image_mount) + umount(new_image_mount) + + +def migrate_sonic_packages(bootloader, binary_image_version): + """ Migrate SONiC packages to new SONiC image. """ + + SONIC_PACKAGE_MANAGER = "sonic-package-manager" + PACKAGE_MANAGER_DIR = "/var/lib/sonic-package-manager/" + DOCKER_CTL_SCRIPT = "/usr/lib/docker/docker.sh" + DOCKERD_SOCK = "docker.sock" + VAR_RUN_PATH = "/var/run/" + + tmp_dir = "tmp" + packages_file = "packages.json" + packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file) + sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version) + new_image_dir = bootloader.get_image_path(binary_image_version) + + with contextlib.ExitStack() as stack: + def get_path(path): + """ Closure to get path by entering + a context manager of bootloader.get_path_in_image """ + + return stack.enter_context(bootloader.get_path_in_image(new_image_dir, path)) + + new_image_squashfs_path = get_path(ROOTFS_NAME) + new_image_upper_dir = get_path(UPPERDIR_NAME) + new_image_work_dir = get_path(WORKDIR_NAME) + new_image_docker_dir = get_path(DOCKERDIR_NAME) + new_image_mount = os.path.join("/", tmp_dir, "image-{0}-fs".format(sonic_version)) + new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker") + + try: + mount_squash_fs(new_image_squashfs_path, new_image_mount) + # make sure upper dir and work dir exist + run_command_or_raise(["mkdir", "-p", new_image_upper_dir]) + run_command_or_raise(["mkdir", "-p", new_image_work_dir]) + mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount) + mount_bind(new_image_docker_dir, new_image_docker_mount) + mount_procfs_chroot(new_image_mount) + mount_sysfs_chroot(new_image_mount) + run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"]) + run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)]) + run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) + run_command_or_raise(["mount", "--bind", + os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), + os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) + run_command_or_raise(["chroot", new_image_mount, SONIC_PACKAGE_MANAGER, "migrate", + os.path.join("/", tmp_dir, packages_file), + "--dockerd-socket", os.path.join("/", tmp_dir, DOCKERD_SOCK), + "-y"]) + finally: + run_command("chroot {} {} stop".format(new_image_mount, DOCKER_CTL_SCRIPT)) + umount(new_image_mount, recursive=True, read_only=False, remove_dir=False) + umount(new_image_mount) + # Main entrypoint @click.group(cls=AliasedGroup) @@ -286,8 +380,10 @@ def sonic_installer(): help="Force installation of an image of a type which differs from that of the current running image") @click.option('--skip_migration', is_flag=True, help="Do not migrate current configuration to the newly installed image") +@click.option('--skip-package-migration', is_flag=True, + help="Do not migrate current packages to the newly installed image") @click.argument('url') -def install(url, force, skip_migration=False): +def install(url, force, skip_migration=False, skip_package_migration=False): """ Install image from local binary or URL""" bootloader = get_bootloader() @@ -331,7 +427,10 @@ def install(url, force, skip_migration=False): else: run_command('config-setup backup') - update_sonic_environment(click, bootloader, binary_image_version) + update_sonic_environment(bootloader, binary_image_version) + + if not skip_package_migration: + migrate_sonic_packages(bootloader, binary_image_version) # Finally, sync filesystem run_command("sync;sync;sync") diff --git a/sonic_package_manager/__init__.py b/sonic_package_manager/__init__.py new file mode 100644 index 0000000000..9d8827c5e4 --- /dev/null +++ b/sonic_package_manager/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python + +from sonic_package_manager.manager import PackageManager + +__all__ = ['PackageManager'] diff --git a/sonic_package_manager/constraint.py b/sonic_package_manager/constraint.py new file mode 100644 index 0000000000..af5a13000b --- /dev/null +++ b/sonic_package_manager/constraint.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python + +""" Package version constraints module. """ + +import re +from abc import ABC +from dataclasses import dataclass, field +from typing import Dict, Union + +import semver + + +class VersionConstraint(semver.VersionConstraint, ABC): + """ Extends VersionConstraint from semver package. """ + + @staticmethod + def parse(constraint_expression: str) -> 'VersionConstraint': + """ Parse version constraint. + + Args: + constraint_expression: Expression syntax: "[[op][version]]+". + Returns: + The resulting VersionConstraint object. + """ + + return semver.parse_constraint(constraint_expression) + + +@dataclass +class ComponentConstraints: + """ ComponentConstraints is a set of components version constraints. """ + + components: Dict[str, VersionConstraint] = field(default_factory=dict) + + @staticmethod + def parse(constraints: Dict) -> 'ComponentConstraints': + """ Parse constraint from dictionary. + + Args: + constraints: dictionary with component name + as key and constraint expression as value + + Returns: + ComponentConstraints object. + + """ + + components = {component: VersionConstraint.parse(version) + for component, version in constraints.items()} + return ComponentConstraints(components) + + def deparse(self) -> Dict[str, str]: + """ Returns the manifest representation of components constraints. + + Returns: + Dictionary of string keys and string values. + + """ + + return { + component: str(version) for component, version in self.components.items() + } + + +@dataclass +class PackageConstraint: + """ PackageConstraint is a package version constraint. """ + + name: str + constraint: VersionConstraint + _components: ComponentConstraints = ComponentConstraints({}) + + def __str__(self): return f'{self.name}{self.constraint}' + + @property + def components(self): return self._components.components + + @staticmethod + def from_string(constraint_expression: str) -> 'PackageConstraint': + """ Parse package constraint string which contains a package + name separated by a space with zero, one or more version constraint + expressions. A variety of version matching operators are supported + including >, <, ==, !=, ^, *. See Examples. + + Args: + constraint_expression: Expression syntax "[package name] [[op][version]]+". + + Returns: + PackageConstraint object. + + Examples: + >>> PackageConstraint.parse('syncd^1.0.0').constraint + =1.0.0,<2.0.0)> + >>> PackageConstraint.parse('swss>1.3.2 <4.2.1').constraint + 1.3.2,<4.2.1)> + >>> PackageConstraint.parse('swss').constraint + + """ + + REQUIREMENT_SPECIFIER_RE = \ + r'(?P[A-Za-z0-9_-]+)(?P.*)' + + match = re.match(REQUIREMENT_SPECIFIER_RE, constraint_expression) + if match is None: + raise ValueError(f'Invalid constraint {constraint_expression}') + groupdict = match.groupdict() + name = groupdict.get('name') + constraint = groupdict.get('constraint') or '*' + return PackageConstraint(name, VersionConstraint.parse(constraint)) + + @staticmethod + def from_dict(constraint_dict: Dict) -> 'PackageConstraint': + """ Parse package constraint information from dictionary. E.g: + + { + "name": "swss", + "version": "^1.0.0", + "componenets": { + "libswsscommon": "^1.0.0" + } + } + + Args: + constraint_dict: Dictionary of constraint infromation. + + Returns: + PackageConstraint object. + """ + + name = constraint_dict['name'] + version = VersionConstraint.parse(constraint_dict.get('version') or '*') + components = ComponentConstraints.parse(constraint_dict.get('components', {})) + return PackageConstraint(name, version, components) + + @staticmethod + def parse(constraint: Union[str, Dict]) -> 'PackageConstraint': + """ Parse constraint from string expression or dictionary. + + Args: + constraint: string or dictionary. Check from_str() and from_dict() methods. + + Returns: + PackageConstraint object. + + """ + + if type(constraint) is str: + return PackageConstraint.from_string(constraint) + elif type(constraint) is dict: + return PackageConstraint.from_dict(constraint) + else: + raise ValueError('Input argument should be either str or dict') + + def deparse(self) -> Dict: + """ Returns the manifest representation of package constraint. + + Returns: + Dictionary in manifest representation. + + """ + + return { + 'name': self.name, + 'version': str(self.constraint), + 'components': self._components.deparse(), + } diff --git a/sonic_package_manager/database.py b/sonic_package_manager/database.py new file mode 100644 index 0000000000..6c1cec5c07 --- /dev/null +++ b/sonic_package_manager/database.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python + +""" Repository Database interface module. """ +import json +import os +from dataclasses import dataclass, replace +from typing import Optional, Dict, Callable + +from sonic_package_manager.errors import PackageManagerError, PackageNotFoundError, PackageAlreadyExistsError +from sonic_package_manager.version import Version + +BASE_LIBRARY_PATH = '/var/lib/sonic-package-manager/' +PACKAGE_MANAGER_DB_FILE_PATH = os.path.join(BASE_LIBRARY_PATH, 'packages.json') +PACKAGE_MANAGER_LOCK_FILE = os.path.join(BASE_LIBRARY_PATH, '.lock') + + +@dataclass(order=True) +class PackageEntry: + """ Package database single entry object. + + Attributes: + name: Name of the package + repository: Default repository to pull package from. + description: Package description or None if package does not + provide a description. + default_reference: Default reference (tag or digest) or None + if default reference is not provided. + version: Installed version of the package or None if + package is not installed. + installed: Boolean flag whether the package is installed. + built_in: Boolean flag whether the package is built in. + image_id: Image ID for this package or None if package + is not installed. + """ + + name: str + repository: Optional[str] + description: Optional[str] = None + default_reference: Optional[str] = None + version: Optional[Version] = None + installed: bool = False + built_in: bool = False + image_id: Optional[str] = None + + +def package_from_dict(name: str, package_info: Dict) -> PackageEntry: + """ Parse dictionary into PackageEntry object.""" + + repository = package_info.get('repository') + description = package_info.get('description') + default_reference = package_info.get('default-reference') + version = package_info.get('installed-version') + if version: + version = Version.parse(version) + installed = package_info.get('installed', False) + built_in = package_info.get('built-in', False) + image_id = package_info.get('image-id') + + return PackageEntry(name, repository, description, + default_reference, version, installed, + built_in, image_id) + + +def package_to_dict(package: PackageEntry) -> Dict: + """ Serialize package into dictionary. """ + + return { + 'repository': package.repository, + 'description': package.description, + 'default-reference': package.default_reference, + 'installed-version': None if package.version is None else str(package.version), + 'installed': package.installed, + 'built-in': package.built_in, + 'image-id': package.image_id, + } + + +class PackageDatabase: + """ An interface to SONiC repository database """ + + def __init__(self, + database: Dict[str, PackageEntry], + on_save: Optional[Callable] = None): + """ Initialize PackageDatabase. + + Args: + database: Database dictionary + on_save: Optional callback to execute on commit() + """ + + self._database = database + self._on_save = on_save + + def add_package(self, + name: str, + repository: str, + description: Optional[str] = None, + default_reference: Optional[str] = None): + """ Adds a new package entry in database. + + Args: + name: Package name. + repository: Repository URL. + description: Description string. + default_reference: Default version string. + + Raises: + PackageAlreadyExistsError: if package already exists in database. + """ + + if self.has_package(name): + raise PackageAlreadyExistsError(name) + + package = PackageEntry(name, repository, description, default_reference) + self._database[name] = package + + def remove_package(self, name: str): + """ Removes package entry from database. + + Args: + name: repository name. + Raises: + PackageNotFoundError: Raises when package with the given name does not exist + in the database. + """ + + pkg = self.get_package(name) + + if pkg.built_in: + raise PackageManagerError(f'Package {name} is built-in, cannot remove it') + + if pkg.installed: + raise PackageManagerError(f'Package {name} is installed, uninstall it first') + + self._database.pop(name) + + def update_package(self, pkg: PackageEntry): + """ Modify repository in the database. + + Args: + pkg: Repository object. + Raises: + PackageManagerError: Raises when repository with the given name does not exist + in the database. + """ + + name = pkg.name + + if not self.has_package(name): + raise PackageNotFoundError(name) + + self._database[name] = pkg + + def get_package(self, name: str) -> PackageEntry: + """ Return a package referenced by name. + If the package is not found PackageNotFoundError is thrown. + + Args: + name: Package name. + Returns: + PackageInfo object. + Raises: + PackageNotFoundError: When package called name was not found. + """ + + try: + pkg = self._database[name] + except KeyError: + raise PackageNotFoundError(name) + + return replace(pkg) + + def has_package(self, name: str) -> bool: + """ Checks if the database contains an entry for a package. + called name. Returns True if the package exists, otherwise False. + + Args: + name: Package name. + Returns: + True if the package exists, otherwise False. + """ + + try: + self.get_package(name) + return True + except PackageNotFoundError: + return False + + def __iter__(self): + """ Iterates over packages in the database. + + Yields: + PackageInfo object. + """ + + for name, _ in self._database.items(): + yield self.get_package(name) + + @staticmethod + def from_file(db_file=PACKAGE_MANAGER_DB_FILE_PATH) -> 'PackageDatabase': + """ Read database content from file. """ + + def on_save(database): + with open(db_file, 'w') as db: + db_content = {} + for name, package in database.items(): + db_content[name] = package_to_dict(package) + json.dump(db_content, db, indent=4) + + database = {} + with open(db_file) as db: + db_content = json.load(db) + for key in db_content: + package = package_from_dict(key, db_content[key]) + database[key] = package + return PackageDatabase(database, on_save) + + def commit(self): + """ Save database content to file. """ + + if self._on_save: + self._on_save(self._database) diff --git a/sonic_package_manager/dockerapi.py b/sonic_package_manager/dockerapi.py new file mode 100644 index 0000000000..926600d0bc --- /dev/null +++ b/sonic_package_manager/dockerapi.py @@ -0,0 +1,226 @@ +#!/usr/bin/evn python + +""" Module provides Docker interface. """ + +import contextlib +import io +import tarfile +import re +from typing import Optional + +from sonic_package_manager.logger import log +from sonic_package_manager.progress import ProgressManager + + +def is_digest(ref: str): + return ref.startswith('sha256:') + + +def bytes_to_mb(bytes): + return bytes / 1024 / 1024 + + +def get_id(line): + return line['id'] + + +def get_status(line): + return line['status'] + + +def get_progress(line): + progress = line['progressDetail'] + current = bytes_to_mb(progress['current']) + total = bytes_to_mb(progress['total']) + return current, total + + +def process_progress(progress_manager, line): + try: + status = get_status(line) + id = get_id(line) + current, total = get_progress(line) + + if id not in progress_manager: + progress_manager.new(id, + total=total, + unit='Mb', + desc=f'{status} {id}') + pbar = progress_manager.get(id) + + # Complete status + if 'complete' in status: + pbar.desc = f'{status} {id}' + pbar.update(pbar.total) + return + + # Status changed + if status not in pbar.desc: + pbar.desc = f'{status} {id}' + pbar.total = total + pbar.count = 0 + + pbar.update(current - pbar.count) + except KeyError: + # not a progress line + return + + +def get_repository_from_image(image): + """ Returns the first RepoTag repository + found in image. """ + + repotags = image.attrs['RepoTags'] + for repotag in repotags: + repository, tag = repotag.split(':') + return repository + + +class DockerApi: + """ DockerApi provides a set of methods - + wrappers around docker client methods """ + + def __init__(self, + client, + progress_manager: Optional[ProgressManager] = None): + self.client = client + self.progress_manager = progress_manager + + def pull(self, repository: str, + reference: Optional[str] = None): + """ Docker 'pull' command. + Args: + repository: repository to pull + reference: tag or digest + """ + + log.debug(f'pulling image from {repository} reference={reference}') + + api = self.client.api + progress_manager = self.progress_manager + + digest = None + + with progress_manager or contextlib.nullcontext(): + for line in api.pull(repository, + reference, + stream=True, + decode=True): + log.debug(f'pull status: {line}') + + status = get_status(line) + + # Record pulled digest + digest_match = re.match(r'Digest: (?P.*)', status) + if digest_match: + digest = digest_match.groupdict()['sha'] + + if progress_manager: + process_progress(progress_manager, line) + + log.debug(f'Digest: {digest}') + log.debug(f'image from {repository} reference={reference} pulled successfully') + + return self.get_image(f'{repository}@{digest}') + + def load(self, imgpath: str): + """ Docker 'load' command. + Args: + + """ + + log.debug(f'loading image from {imgpath}') + + api = self.client.api + progress_manager = self.progress_manager + + imageid = None + repotag = None + + with progress_manager or contextlib.nullcontext(): + with open(imgpath, 'rb') as imagefile: + for line in api.load_image(imagefile, quiet=False): + log.debug(f'pull status: {line}') + + if progress_manager: + process_progress(progress_manager, line) + + if 'stream' not in line: + continue + + stream = line['stream'] + repotag_match = re.match(r'Loaded image: (?P.*)\n', stream) + if repotag_match: + repotag = repotag_match.groupdict()['repotag'] + imageid_match = re.match(r'Loaded image ID: sha256:(?P.*)\n', stream) + if imageid_match: + imageid = imageid_match.groupdict()['id'] + + imagename = repotag if repotag else imageid + log.debug(f'Loaded image {imagename}') + + return self.get_image(imagename) + + def rmi(self, image: str, **kwargs): + """ Docker 'rmi -f' command. """ + + log.debug(f'removing image {image} kwargs={kwargs}') + + self.client.images.remove(image, **kwargs) + + log.debug(f'image {image} removed successfully') + + def tag(self, image: str, repotag: str, **kwargs): + """ Docker 'tag' command """ + + log.debug(f'tagging image {image} {repotag} kwargs={kwargs}') + + img = self.client.images.get(image) + img.tag(repotag, **kwargs) + + log.debug(f'image {image} tagged {repotag} successfully') + + def rm(self, container: str, **kwargs): + """ Docker 'rm' command. """ + + self.client.containers.get(container).remove(**kwargs) + log.debug(f'removed container {container}') + + def ps(self, **kwargs): + """ Docker 'ps' command. """ + + return self.client.containers.list(**kwargs) + + def labels(self, image: str): + """ Returns a list of labels associated with image. """ + + log.debug(f'inspecting image labels {image}') + + labels = self.client.images.get(image).labels + + log.debug(f'image {image} labels successfully: {labels}') + return labels + + def get_image(self, name: str): + return self.client.images.get(name) + + def extract(self, image, src_path: str, dst_path: str): + """ Copy src_path from the docker image to host dst_path. """ + + buf = bytes() + + container = self.client.containers.create(image) + try: + bits, _ = container.get_archive(src_path) + for chunk in bits: + buf += chunk + finally: + container.remove(force=True) + + with tarfile.open(fileobj=io.BytesIO(buf)) as tar: + for member in tar: + if dst_path.endswith('/'): + tar.extract(member, dst_path) + else: + member.name = dst_path + tar.extract(member, dst_path) diff --git a/sonic_package_manager/errors.py b/sonic_package_manager/errors.py new file mode 100644 index 0000000000..17279c52c4 --- /dev/null +++ b/sonic_package_manager/errors.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python + +""" SONiC Package Manager exceptions are defined in this module. """ + +from dataclasses import dataclass +from typing import Optional + +from sonic_package_manager.constraint import PackageConstraint, VersionConstraint +from sonic_package_manager.version import Version + + +class PackageManagerError(Exception): + """ Base class for exceptions generated by SONiC package manager """ + + pass + + +class ManifestError(Exception): + """ Class for manifest validate failures. """ + + pass + + +class MetadataError(Exception): + """ Class for metadata failures. """ + + pass + + +@dataclass +class PackageNotFoundError(PackageManagerError): + """ Repository not found in repository database exception """ + + name: str + + def __str__(self): + return f'Package {self.name} is not found in packages database' + + +@dataclass +class PackageAlreadyExistsError(PackageManagerError): + """ Package already exists in the packages database exception. """ + + name: str + + def __str__(self): + return f'Package {self.name} already exists in packages database' + + +class PackageInstallationError(PackageManagerError): + """ Exception for package installation error. """ + + pass + + +class PackageUninstallationError(PackageManagerError): + """ Exception for package installation error. """ + + pass + + +class PackageUpgradeError(PackageManagerError): + """ Exception for package upgrade error. """ + + pass + + +@dataclass +class PackageSonicRequirementError(PackageInstallationError): + """ Exception for installation errors, when SONiC version requirement is not met. """ + + name: str + component: str + constraint: PackageConstraint + installed_ver: Optional[Version] = None + + def __str__(self): + if self.installed_ver is not None: + return (f'Package {self.name} requires base OS component {self.component} version {self.constraint} ' + f'while the installed version is {self.installed_ver}') + return (f'Package {self.name} requires base OS component {self.component} version {self.constraint} ' + f'but it is not present int base OS image') + + +@dataclass +class PackageDependencyError(PackageInstallationError): + """ Exception class for installation errors related to missing dependency. """ + + name: str + constraint: PackageConstraint + installed_ver: Optional[Version] = None + + def __str__(self): + if self.installed_ver: + return (f'Package {self.name} requires {self.constraint} ' + f'but version {self.installed_ver} is installed') + return f'Package {self.name} requires {self.constraint} but it is not installed' + + +@dataclass +class PackageComponentDependencyError(PackageInstallationError): + """ Exception class for installation error caused by component + version dependency. """ + + name: str + dependency: str + component: str + constraint: VersionConstraint + installed_ver: Optional[Version] = None + + def __str__(self): + if self.installed_ver: + return (f'Package {self.name} requires {self.component} {self.constraint} ' + f'in package {self.dependency} but version {self.installed_ver} is installed') + return (f'Package {self.name} requires {self.component} {self.constraint} ' + f'in package {self.dependency} but it is not installed') + + +@dataclass +class PackageConflictError(PackageInstallationError): + """ Exception class for installation errors related to missing dependency. """ + + name: str + constraint: PackageConstraint + installed_ver: Version + + def __str__(self): + return (f'Package {self.name} conflicts with {self.constraint} but ' + f'version {self.installed_ver} is installed') + + +@dataclass +class PackageComponentConflictError(PackageInstallationError): + """ Exception class for installation error caused by component + version conflict. """ + + name: str + dependency: str + component: str + constraint: VersionConstraint + installed_ver: Version + + def __str__(self): + return (f'Package {self.name} conflicts with {self.component} {self.constraint} ' + f'in package {self.dependency} but version {self.installed_ver} is installed') + diff --git a/sonic_package_manager/logger.py b/sonic_package_manager/logger.py new file mode 100644 index 0000000000..3d5e06d35f --- /dev/null +++ b/sonic_package_manager/logger.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +""" Logger for sonic-package-manager. """ + +import logging.handlers + +import click_log + + +class Formatter(click_log.ColorFormatter): + """ Click logging formatter. """ + + colors = { + 'error': dict(fg='red'), + 'exception': dict(fg='red'), + 'critical': dict(fg='red'), + 'debug': dict(fg='blue', bold=True), + 'warning': dict(fg='yellow'), + } + + +log = logging.getLogger("sonic-package-manager") +log.setLevel(logging.INFO) + +click_handler = click_log.ClickHandler() +click_handler.formatter = Formatter() + +log.addHandler(click_handler) +log.addHandler(logging.handlers.SysLogHandler()) diff --git a/sonic_package_manager/main.py b/sonic_package_manager/main.py new file mode 100644 index 0000000000..c0589ae5b5 --- /dev/null +++ b/sonic_package_manager/main.py @@ -0,0 +1,460 @@ +#!/usr/bin/env python + +import functools +import json +import os +import sys +import typing + +import click +import click_log +import tabulate +from natsort import natsorted + +from sonic_package_manager.database import PackageEntry, PackageDatabase +from sonic_package_manager.errors import PackageManagerError +from sonic_package_manager.logger import log +from sonic_package_manager.manager import PackageManager + +BULLET_UC = '\u2022' + + +def exit_cli(*args, **kwargs): + """ Print a message and exit with rc 1. """ + + click.secho(*args, **kwargs) + sys.exit(1) + + +def show_help(ctx): + """ Show help message and exit process successfully. """ + + click.echo(ctx.get_help()) + ctx.exit(0) + + +def root_privileges_required(func: typing.Callable) -> typing.Callable: + """ Decorates a function, so that the function is invoked + only if the user is root. """ + + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + """ Wrapper around func. """ + + if os.geteuid() != 0: + exit_cli('Root privileges required for this operation', fg='red') + + return func(*args, **kwargs) + + wrapped_function.__doc__ += '\n\n NOTE: This command requires elevated (root) privileges to run.' + + return wrapped_function + + +def add_options(options): + """ Decorator to append options from + input list to command. """ + + def _add_options(func): + for option in reversed(options): + func = option(func) + return func + + return _add_options + + +class MutuallyExclusiveOption(click.Option): + """ This options type is extended with 'mutually_exclusive' + parameter which makes CLI to check if several options are now + used together in single command. """ + + def __init__(self, *args, **kwargs): + self.mutually_exclusive = set(kwargs.pop('mutually_exclusive', [])) + help_string = kwargs.get('help', '') + if self.mutually_exclusive: + ex_str = ', '.join(self.mutually_exclusive) + kwargs['help'] = f'{help_string} ' \ + f'NOTE: This argument is mutually ' \ + f'exclusive with arguments: [{ex_str}].' + super().__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + if self.name in opts and opts[self.name] is not None: + for opt_name in self.mutually_exclusive.intersection(opts): + if opts[opt_name] is None: + continue + + raise click.UsageError(f'Illegal usage: {self.name} is mutually ' + f'exclusive with arguments ' + f'{", ".join(self.mutually_exclusive)}.') + + return super().handle_parse_result(ctx, opts, args) + + +PACKAGE_SOURCE_OPTIONS = [ + click.option('--from-repository', + help='Fetch package directly from image registry repository.', + cls=MutuallyExclusiveOption, + mutually_exclusive=['from_tarball', 'package_expr']), + click.option('--from-tarball', + type=click.Path(exists=True, + readable=True, + file_okay=True, + dir_okay=False), + help='Fetch package from saved image tarball.', + cls=MutuallyExclusiveOption, + mutually_exclusive=['from_repository', 'package_expr']), + click.argument('package-expr', + type=str, + required=False) +] + + +PACKAGE_COMMON_INSTALL_OPTIONS = [ + click.option('--skip-host-plugins', + is_flag=True, + help='Do not install host OS plugins provided by the package (CLI, etc). ' + 'NOTE: In case when package host OS plugins are set as mandatory in ' + 'package manifest this option will fail the installation.') +] + + +PACKAGE_COMMON_OPERATION_OPTIONS = [ + click.option('-f', '--force', + is_flag=True, + help='Force operation by ignoring package dependency tree and package manifest validation failures.'), + click.option('-y', '--yes', + is_flag=True, + help='Automatically answer yes on prompts.'), + click_log.simple_verbosity_option(log, help='Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default is INFO.'), +] + + +def get_package_status(package: PackageEntry): + """ Returns the installation status message for package. """ + + if package.built_in: + return 'Built-In' + elif package.installed: + return 'Installed' + else: + return 'Not Installed' + + +@click.group() +@click.pass_context +def cli(ctx): + """ SONiC Package Manager """ + + ctx.obj = PackageManager.get_manager() + + +@cli.group() +@click.pass_context +def repository(ctx): + """ Repository management commands. """ + + pass + + +@cli.group() +@click.pass_context +def show(ctx): + """ Package manager show commands. """ + + pass + + +@show.group() +@click.pass_context +def package(ctx): + """ Package show commands. """ + + pass + + +@cli.command() +@click.pass_context +def list(ctx): + """ List available packages. """ + + table_header = ['Name', 'Repository', 'Description', 'Version', 'Status'] + table_body = [] + + manager: PackageManager = ctx.obj + + try: + for package in natsorted(manager.database): + repository = package.repository or 'N/A' + version = package.version or 'N/A' + description = package.description or 'N/A' + status = get_package_status(package) + + table_body.append([ + package.name, + repository, + description, + version, + status + ]) + + click.echo(tabulate.tabulate(table_body, table_header)) + except PackageManagerError as err: + exit_cli(f'Failed to list repositories: {err}', fg='red') + + +@package.command() +@add_options(PACKAGE_SOURCE_OPTIONS) +@click.pass_context +def manifest(ctx, + package_expr, + from_repository, + from_tarball): + """ Show package manifest. """ + + manager: PackageManager = ctx.obj + + try: + source = manager.get_package_source(package_expr, + from_repository, + from_tarball) + package = source.get_package() + click.echo(json.dumps(package.manifest.unmarshal(), indent=4)) + except Exception as err: + exit_cli(f'Failed to print manifest: {err}', fg='red') + + +@package.command() +@click.argument('name') +@click.option('--all', is_flag=True, help='Show all available tags in repository.') +@click.option('--plain', is_flag=True, help='Plain output.') +@click.pass_context +def versions(ctx, name, all, plain): + """ Show available versions. """ + + try: + manager: PackageManager = ctx.obj + versions = manager.get_package_available_versions(name, all) + for version in versions: + if not plain: + click.secho(f'{BULLET_UC} ', bold=True, fg='green', nl=False) + click.secho(f'{version}') + except Exception as err: + exit_cli(f'Failed to get package versions for {name}: {err}', fg='red') + + +@package.command() +@add_options(PACKAGE_SOURCE_OPTIONS) +@click.pass_context +def changelog(ctx, + package_expr, + from_repository, + from_tarball): + """ Show package changelog. """ + + manager: PackageManager = ctx.obj + + try: + source = manager.get_package_source(package_expr, + from_repository, + from_tarball) + package = source.get_package() + changelog = package.manifest['package']['changelog'] + + if not changelog: + raise PackageManagerError(f'No changelog for package {package.name}') + + for version, entry in changelog.items(): + author = entry.get('author') or 'N/A' + email = entry.get('email') or 'N/A' + changes = entry.get('changes') or [] + date = entry.get('date') or 'N/A' + click.secho(f'{version}:\n', fg='green', bold=True) + for line in changes: + click.secho(f' {BULLET_UC} {line}', bold=True) + click.secho(f'\n {author} ' + f'({email}) {date}', fg='green', bold=True) + click.secho('') + + except Exception as err: + exit_cli(f'Failed to print package changelog: {err}', fg='red') + + +@repository.command() +@click.argument('name', type=str) +@click.argument('repository', type=str) +@click.option('--default-reference', type=str, help='Default installation reference. Can be a tag or sha256 digest in repository.') +@click.option('--description', type=str, help='Optional package entry description.') +@click.pass_context +@root_privileges_required +def add(ctx, name, repository, default_reference, description): + """ Add a new repository to database. """ + + manager: PackageManager = ctx.obj + + try: + manager.add_repository(name, + repository, + description=description, + default_reference=default_reference) + except Exception as err: + exit_cli(f'Failed to add repository {name}: {err}', fg='red') + + +@repository.command() +@click.argument("name") +@click.pass_context +@root_privileges_required +def remove(ctx, name): + """ Remove repository from database. """ + + manager: PackageManager = ctx.obj + + try: + manager.remove_repository(name) + except Exception as err: + exit_cli(f'Failed to remove repository {name}: {err}', fg='red') + + +@cli.command() +@click.option('--enable', + is_flag=True, + default=None, + help='Set the default state of the feature to enabled ' + 'and enable feature right after installation. ' + 'NOTE: user needs to execute "config save -y" to make ' + 'this setting persistent.') +@click.option('--set-owner', + type=click.Choice(['local', 'kube']), + default=None, + help='Default owner configuration setting for a feature.') +@click.option('--allow-downgrade', + is_flag=True, + default=None, + help='Allow package downgrade. By default an attempt to downgrade the package ' + 'will result in a failure since downgrade might not be supported by the package, ' + 'thus requires explicit request from the user.') +@add_options(PACKAGE_SOURCE_OPTIONS) +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@add_options(PACKAGE_COMMON_INSTALL_OPTIONS) +@click.pass_context +@root_privileges_required +def install(ctx, + package_expr, + from_repository, + from_tarball, + force, + yes, + enable, + set_owner, + skip_host_plugins, + allow_downgrade): + """ Install/Upgrade package using [PACKAGE_EXPR] in format "[=|@]". + + The repository to pull the package from is resolved by lookup in package database, + thus the package has to be added via "sonic-package-manager repository add" command. + + In case when [PACKAGE_EXPR] is a package name "" this command will install or upgrade + to a version referenced by "default-reference" in package database. """ + + manager: PackageManager = ctx.obj + + package_source = package_expr or from_repository or from_tarball + if not package_source: + exit_cli(f'Package source is not specified', fg='red') + + if not yes and not force: + click.confirm(f'{package_source} is going to be installed, ' + f'continue?', abort=True, show_default=True) + + install_opts = { + 'force': force, + 'skip_host_plugins': skip_host_plugins, + } + if enable is not None: + install_opts['enable'] = enable + if set_owner is not None: + install_opts['default_owner'] = set_owner + if allow_downgrade is not None: + install_opts['allow_downgrade'] = allow_downgrade + + try: + manager.install(package_expr, + from_repository, + from_tarball, + **install_opts) + except Exception as err: + exit_cli(f'Failed to install {package_source}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@add_options(PACKAGE_COMMON_INSTALL_OPTIONS) +@click.argument('name') +@click.pass_context +@root_privileges_required +def reset(ctx, name, force, yes, skip_host_plugins): + """ Reset package to the default version. """ + + manager: PackageManager = ctx.obj + + if not yes and not force: + click.confirm(f'Package {name} is going to be reset to default version, ' + f'continue?', abort=True, show_default=True) + + try: + manager.reset(name, force, skip_host_plugins) + except Exception as err: + exit_cli(f'Failed to reset package {name}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@click.argument('name') +@click.pass_context +@root_privileges_required +def uninstall(ctx, name, force, yes): + """ Uninstall package. """ + + manager: PackageManager = ctx.obj + + if not yes and not force: + click.confirm(f'Package {name} is going to be uninstalled, ' + f'continue?', abort=True, show_default=True) + + try: + manager.uninstall(name, force) + except Exception as err: + exit_cli(f'Failed to uninstall package {name}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@click.option('--dockerd-socket', type=click.Path()) +@click.argument('database', type=click.Path()) +@click.pass_context +@root_privileges_required +def migrate(ctx, database, force, yes, dockerd_socket): + """ Migrate packages from the given database file. """ + + manager: PackageManager = ctx.obj + + if not yes and not force: + click.confirm('Continue with package migration?', abort=True, show_default=True) + + try: + manager.migrate_packages(PackageDatabase.from_file(database), dockerd_socket) + except Exception as err: + exit_cli(f'Failed to migrate packages {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +if __name__ == "__main__": + cli() diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py new file mode 100644 index 0000000000..ba437534ed --- /dev/null +++ b/sonic_package_manager/manager.py @@ -0,0 +1,931 @@ +#!/usr/bin/env python + +import contextlib +import functools +import os +import pkgutil +import tempfile +from inspect import signature +from typing import Any, Iterable, Callable, Dict, Optional + +import docker +import filelock +from sonic_py_common import device_info + +from sonic_package_manager import utils +from sonic_package_manager.constraint import ( + VersionConstraint, + PackageConstraint +) +from sonic_package_manager.database import ( + PACKAGE_MANAGER_LOCK_FILE, + PackageDatabase +) +from sonic_package_manager.dockerapi import DockerApi +from sonic_package_manager.errors import ( + PackageManagerError, + PackageDependencyError, + PackageComponentDependencyError, + PackageConflictError, + PackageComponentConflictError, + PackageInstallationError, + PackageSonicRequirementError, + PackageUninstallationError, + PackageUpgradeError +) +from sonic_package_manager.logger import log +from sonic_package_manager.metadata import MetadataResolver +from sonic_package_manager.package import Package +from sonic_package_manager.progress import ProgressManager +from sonic_package_manager.reference import PackageReference +from sonic_package_manager.registry import RegistryResolver +from sonic_package_manager.service_creator.creator import ( + ServiceCreator, + run_command +) +from sonic_package_manager.service_creator.feature import FeatureRegistry +from sonic_package_manager.service_creator.sonic_db import SonicDB +from sonic_package_manager.service_creator.utils import in_chroot +from sonic_package_manager.source import ( + PackageSource, + LocalSource, + RegistrySource, + TarballSource +) +from sonic_package_manager.utils import DockerReference +from sonic_package_manager.version import ( + Version, + VersionRange, + version_to_tag, + tag_to_version +) + + +@contextlib.contextmanager +def failure_ignore(ignore: bool): + """ Ignores failures based on parameter passed. """ + + try: + yield + except Exception as err: + if ignore: + log.warning(f'ignoring error {err}') + else: + raise + + +def under_lock(func: Callable) -> Callable: + """ Execute operations under lock. """ + + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + self = args[0] + with self.lock: + return func(*args, **kwargs) + + return wrapped_function + + +def opt_check(func: Callable) -> Callable: + """ Check kwargs for function. """ + + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + sig = signature(func) + unsupported_opts = [opt for opt in kwargs if opt not in sig.parameters] + if unsupported_opts: + raise PackageManagerError( + f'Unsupported options {unsupported_opts} for {func.__name__}' + ) + return func(*args, **kwargs) + + return wrapped_function + + +def rollback(func, *args, **kwargs): + """ Used in rollback callbacks to ignore failure + but proceed with rollback. Error will be printed + but not fail the whole procedure of rollback. """ + + @functools.wraps(func) + def wrapper(): + try: + func(*args, **kwargs) + except Exception as err: + log.error(f'failed in rollback: {err}') + + return wrapper + + +def package_constraint_to_reference(constraint: PackageConstraint) -> PackageReference: + package_name, version_constraint = constraint.name, constraint.constraint + # Allow only specific version for now. + # Later we can improve package manager to support + # installing packages using expressions like 'package>1.0.0' + if version_constraint == VersionRange(): # empty range means any version + return PackageReference(package_name, None) + if not isinstance(version_constraint, Version): + raise PackageManagerError(f'Can only install specific version. ' + f'Use only following expression "{package_name}=" ' + f'to install specific version') + return PackageReference(package_name, version_to_tag(version_constraint)) + + +def parse_reference_expression(expression): + try: + return package_constraint_to_reference(PackageConstraint.parse(expression)) + except ValueError: + # if we failed to parse the expression as constraint expression + # we will try to parse it as reference + return PackageReference.parse(expression) + + +def validate_package_base_os_constraints(package: Package, sonic_version_info: Dict[str, str]): + """ Verify that all dependencies on base OS components are met. + Args: + package: Package to check constraints for. + sonic_version_info: SONiC components version information. + Raises: + PackageSonicRequirementError: in case dependency is not satisfied. + """ + + base_os_constraints = package.manifest['package']['base-os'].components + for component, constraint in base_os_constraints.items(): + if component not in sonic_version_info: + raise PackageSonicRequirementError(package.name, component, constraint) + + version = Version.parse(sonic_version_info[component]) + + if not constraint.allows_all(version): + raise PackageSonicRequirementError(package.name, component, constraint, version) + + +def validate_package_tree(packages: Dict[str, Package]): + """ Verify that all dependencies are met in all packages passed to this function. + Args: + packages: list of packages to check + Raises: + PackageDependencyError: if dependency is missing + PackageConflictError: if there is a conflict between packages + """ + + for name, package in packages.items(): + log.debug(f'checking dependencies for {name}') + for dependency in package.manifest['package']['depends']: + dependency_package = packages.get(dependency.name) + if dependency_package is None: + raise PackageDependencyError(package.name, dependency) + + installed_version = dependency_package.version + log.debug(f'dependency package is installed {dependency.name}: {installed_version}') + if not dependency.constraint.allows_all(installed_version): + raise PackageDependencyError(package.name, dependency, installed_version) + + dependency_components = dependency.components + if not dependency_components: + dependency_components = {} + for component, version in package.components.items(): + implicit_constraint = VersionConstraint.parse(f'^{version.major}.{version.minor}.0') + dependency_components[component] = implicit_constraint + + for component, constraint in dependency_components.items(): + if component not in dependency_package.components: + raise PackageComponentDependencyError(package.name, dependency, + component, constraint) + + component_version = dependency_package.components[component] + log.debug(f'dependency package {dependency.name}: ' + f'component {component} version is {component_version}') + + if not constraint.allows_all(component_version): + raise PackageComponentDependencyError(package.name, dependency, component, + constraint, component_version) + + log.debug(f'checking conflicts for {name}') + for conflict in package.manifest['package']['breaks']: + conflicting_package = packages.get(conflict.name) + if conflicting_package is None: + continue + + installed_version = conflicting_package.version + log.debug(f'conflicting package is installed {conflict.name}: {installed_version}') + if conflict.constraint.allows_all(installed_version): + raise PackageConflictError(package.name, conflict, installed_version) + + for component, constraint in conflicting_package.components.items(): + if component not in conflicting_package.components: + continue + + component_version = conflicting_package.components[component] + log.debug(f'conflicting package {dependency.name}: ' + f'component {component} version is {component_version}') + + if constraint.allows_all(component_version): + raise PackageComponentConflictError(package.name, dependency, component, + constraint, component_version) + + +def validate_package_cli_can_be_skipped(package: Package, skip: bool): + """ Checks whether package CLI installation can be skipped. + + Args: + package: Package to validate + skip: Whether to skip installing CLI + + Raises: + PackageManagerError + + """ + + if package.manifest['cli']['mandatory'] and skip: + raise PackageManagerError(f'CLI is mandatory for package {package.name} ' + f'but it was requested to be not installed') + elif skip: + log.warning(f'Package {package.name} CLI plugin will not be installed') + + +class PackageManager: + """ SONiC Package Manager. This class provides public API + for sonic_package_manager python library. It has functionality + for installing, uninstalling, updating SONiC packages as well as + retrieving information about the packages from different sources. """ + + def __init__(self, + docker_api: DockerApi, + registry_resolver: RegistryResolver, + database: PackageDatabase, + metadata_resolver: MetadataResolver, + service_creator: ServiceCreator, + device_information: Any, + lock: filelock.FileLock): + """ Initialize PackageManager. """ + + self.lock = lock + self.docker = docker_api + self.registry_resolver = registry_resolver + self.database = database + self.metadata_resolver = metadata_resolver + self.service_creator = service_creator + self.feature_registry = service_creator.feature_registry + self.is_multi_npu = device_information.is_multi_npu() + self.num_npus = device_information.get_num_npus() + self.version_info = device_information.get_sonic_version_info() + + @under_lock + def add_repository(self, *args, **kwargs): + """ Add repository to package database + and commit database content. + + Args: + args: Arguments to pass to PackageDatabase.add_package + kwargs: Keyword arguments to pass to PackageDatabase.add_package + """ + + self.database.add_package(*args, **kwargs) + self.database.commit() + + @under_lock + def remove_repository(self, name: str): + """ Remove repository from package database + and commit database content. + + Args: + name: package name + """ + + self.database.remove_package(name) + self.database.commit() + + @under_lock + def install(self, + expression: Optional[str] = None, + repotag: Optional[str] = None, + tarball: Optional[str] = None, + **kwargs): + """ Install/Upgrade SONiC Package from either an expression + representing the package and its version, repository and tag or + digest in same format as "docker pulL" accepts or an image tarball path. + + Args: + expression: SONiC Package reference expression + repotag: Install/Upgrade from REPO[:TAG][@DIGEST] + tarball: Install/Upgrade from tarball, path to tarball file + kwargs: Install/Upgrade options for self.install_from_source + Raises: + PackageManagerError + """ + + source = self.get_package_source(expression, repotag, tarball) + package = source.get_package() + + if self.is_installed(package.name): + self.upgrade_from_source(source, **kwargs) + else: + self.install_from_source(source, **kwargs) + + @under_lock + @opt_check + def install_from_source(self, + source: PackageSource, + force=False, + enable=False, + default_owner='local', + skip_host_plugins=False): + """ Install SONiC Package from source represented by PackageSource. + This method contains the logic of package installation. + + Args: + source: SONiC Package source. + force: Force the installation. + enable: If True the installed feature package will be enabled. + default_owner: Owner of the installed package. + skip_host_plugins: Skip CLI plugin installation. + Raises: + PackageManagerError + """ + + package = source.get_package() + name = package.name + + with failure_ignore(force): + if self.is_installed(name): + raise PackageInstallationError(f'{name} is already installed') + + version = package.manifest['package']['version'] + feature_state = 'enabled' if enable else 'disabled' + installed_packages = self._get_installed_packages_and(package) + + with failure_ignore(force): + validate_package_base_os_constraints(package, self.version_info) + validate_package_tree(installed_packages) + validate_package_cli_can_be_skipped(package, skip_host_plugins) + + # After all checks are passed we proceed to actual installation + + # When installing package from a tarball or directly from registry + # package name may not be in database. + if not self.database.has_package(package.name): + self.database.add_package(package.name, package.repository) + + try: + with contextlib.ExitStack() as exits: + source.install(package) + exits.callback(rollback(source.uninstall, package)) + + self.service_creator.create(package, state=feature_state, owner=default_owner) + exits.callback(rollback(self.service_creator.remove, package)) + + if not skip_host_plugins: + self._install_cli_plugins(package) + exits.callback(rollback(self._uninstall_cli_plugins, package)) + + exits.pop_all() + except Exception as err: + raise PackageInstallationError(f'Failed to install {package.name}: {err}') + except KeyboardInterrupt: + raise + + package.entry.installed = True + package.entry.version = version + self.database.update_package(package.entry) + self.database.commit() + + @under_lock + @opt_check + def uninstall(self, name: str, force=False): + """ Uninstall SONiC Package referenced by name. The uninstallation + can be forced if force argument is True. + + Args: + name: SONiC Package name. + force: Force the installation. + Raises: + PackageManagerError + """ + + with failure_ignore(force): + if not self.is_installed(name): + raise PackageUninstallationError(f'{name} is not installed') + + package = self.get_installed_package(name) + service_name = package.manifest['service']['name'] + + with failure_ignore(force): + if self.feature_registry.is_feature_enabled(service_name): + raise PackageUninstallationError( + f'{service_name} is enabled. Disable the feature first') + + if package.built_in: + raise PackageUninstallationError( + f'Cannot uninstall built-in package {package.name}') + + installed_packages = self._get_installed_packages_except(package) + + with failure_ignore(force): + validate_package_tree(installed_packages) + + # After all checks are passed we proceed to actual uninstallation + + try: + self._uninstall_cli_plugins(package) + self.service_creator.remove(package) + + # Clean containers based on this image + containers = self.docker.ps(filters={'ancestor': package.image_id}, + all=True) + for container in containers: + self.docker.rm(container.id, force=True) + + self.docker.rmi(package.image_id, force=True) + package.entry.image_id = None + except Exception as err: + raise PackageUninstallationError( + f'Failed to uninstall {package.name}: {err}' + ) + + package.entry.installed = False + package.entry.version = None + self.database.update_package(package.entry) + self.database.commit() + + @under_lock + @opt_check + def upgrade_from_source(self, + source: PackageSource, + force=False, + skip_host_plugins=False, + allow_downgrade=False): + """ Upgrade SONiC Package to a version the package reference + expression specifies. Can force the upgrade if force parameter + is True. Force can allow a package downgrade. + + Args: + source: SONiC Package source + force: Force the upgrade. + skip_host_plugins: Skip host OS plugins installation. + allow_downgrade: Flag to allow package downgrade. + Raises: + PackageManagerError + """ + + new_package = source.get_package() + name = new_package.name + + with failure_ignore(force): + if not self.is_installed(name): + raise PackageUpgradeError(f'{name} is not installed') + + old_package = self.get_installed_package(name) + + if old_package.built_in: + raise PackageUpgradeError( + f'Cannot upgrade built-in package {old_package.name}' + ) + + old_feature = old_package.manifest['service']['name'] + new_feature = new_package.manifest['service']['name'] + old_version = old_package.manifest['package']['version'] + new_version = new_package.manifest['package']['version'] + + with failure_ignore(force): + if old_version == new_version: + raise PackageUpgradeError(f'{new_version} is already installed') + + # TODO: Not all packages might support downgrade. + # We put a check here but we understand that for some packages + # the downgrade might be safe to do. There can be a variable in manifest + # describing package downgrade ability or downgrade-able versions. + if new_version < old_version and not allow_downgrade: + raise PackageUpgradeError( + f'Request to downgrade from {old_version} to {new_version}. ' + f'Downgrade might be not supported by the package' + ) + + # remove currently installed package from the list + installed_packages = self._get_installed_packages_and(new_package) + + with failure_ignore(force): + validate_package_base_os_constraints(new_package, self.version_info) + validate_package_tree(installed_packages) + validate_package_cli_can_be_skipped(new_package, skip_host_plugins) + + # After all checks are passed we proceed to actual upgrade + + try: + with contextlib.ExitStack() as exits: + self._uninstall_cli_plugins(old_package) + exits.callback(rollback(self._install_cli_plugins, old_package)) + + source.install(new_package) + exits.callback(rollback(source.uninstall, new_package)) + + if self.feature_registry.is_feature_enabled(old_feature): + self._systemctl_action(old_package, 'stop') + exits.callback(rollback(self._systemctl_action, + old_package, 'start')) + + self.service_creator.remove(old_package, deregister_feature=False) + exits.callback(rollback(self.service_creator.create, + old_package, register_feature=False)) + + # Clean containers based on the old image + containers = self.docker.ps(filters={'ancestor': old_package.image_id}, + all=True) + for container in containers: + self.docker.rm(container.id, force=True) + + self.service_creator.create(new_package, register_feature=False) + exits.callback(rollback(self.service_creator.remove, new_package, + register_feature=False)) + + if self.feature_registry.is_feature_enabled(new_feature): + self._systemctl_action(new_package, 'start') + exits.callback(rollback(self._systemctl_action, + new_package, 'stop')) + + if not skip_host_plugins: + self._install_cli_plugins(new_package) + exits.callback(rollback(self._uninstall_cli_plugin, old_package)) + + self.docker.rmi(old_package.image_id, force=True) + + exits.pop_all() + except Exception as err: + raise PackageUpgradeError(f'Failed to upgrade {new_package.name}: {err}') + except KeyboardInterrupt: + raise + + new_package_entry = new_package.entry + new_package_entry.installed = True + new_package_entry.version = new_version + self.database.update_package(new_package_entry) + self.database.commit() + + @under_lock + @opt_check + def reset(self, name: str, force: bool = False, skip_host_plugins: bool = False): + """ Reset package to defaults version + + Args: + name: SONiC Package name. + force: Force the installation. + skip_host_plugins: Skip host plugins installation. + Raises: + PackageManagerError + """ + + with failure_ignore(force): + if not self.is_installed(name): + raise PackageManagerError(f'{name} is not installed') + + package = self.get_installed_package(name) + default_reference = package.entry.default_reference + if default_reference is None: + raise PackageManagerError(f'package {name} has no default reference') + + package_ref = PackageReference(name, default_reference) + source = self.get_package_source(package_ref=package_ref) + self.upgrade_from_source(source, force=force, + allow_downgrade=True, + skip_host_plugins=skip_host_plugins) + + @under_lock + def migrate_packages(self, + old_package_database: PackageDatabase, + dockerd_sock: Optional[str] = None): + """ + Migrate packages from old database. This function can do a comparison between + current database and the database passed in as argument. If the package is + missing in the current database it will be added. If the package is installed + in the passed database and in the current it is not installed it will be + installed with a passed database package version. If the package is installed + in the passed database and it is installed in the current database but with + older version the package will be upgraded to the never version. If the package + is installed in the passed database and in the current it is installed but with + never version - no actions are taken. If dockerd_sock parameter is passed, the + migration process will use loaded images from docker library of the currently + installed image. + + Args: + old_package_database: SONiC Package Database to migrate packages from. + dockerd_sock: Path to dockerd socket. + Raises: + PackageManagerError + """ + + self._migrate_package_database(old_package_database) + + def migrate_package(old_package_entry, + new_package_entry): + """ Migrate package routine + + Args: + old_package_entry: Entry in old package database. + new_package_entry: Entry in new package database. + """ + + name = new_package_entry.name + version = new_package_entry.version + + if dockerd_sock: + # dockerd_sock is defined, so use docked_sock to connect to + # dockerd and fetch package image from it. + log.info(f'installing {name} from old docker library') + docker_api = DockerApi(docker.DockerClient(base_url=f'unix://{dockerd_sock}')) + + image = docker_api.get_image(old_package_entry.image_id) + + with tempfile.NamedTemporaryFile('wb') as file: + for chunk in image.save(named=True): + file.write(chunk) + + self.install(tarball=file.name) + else: + log.info(f'installing {name} version {version}') + + self.install(f'{name}={version}') + + # TODO: Topological sort packages by their dependencies first. + for old_package in old_package_database: + if not old_package.installed or old_package.built_in: + continue + + log.info(f'migrating package {old_package.name}') + + new_package = self.database.get_package(old_package.name) + + if new_package.installed: + if old_package.version > new_package.version: + log.info(f'{old_package.name} package version is greater ' + f'then installed in new image: ' + f'{old_package.version} > {new_package.version}') + log.info(f'upgrading {new_package.name} to {old_package.version}') + new_package.version = old_package.version + migrate_package(old_package, new_package) + else: + log.info(f'skipping {new_package.name} as installed version is newer') + elif new_package.default_reference is not None: + new_package_ref = PackageReference(new_package.name, new_package.default_reference) + package_source = self.get_package_source(package_ref=new_package_ref) + package = package_source.get_package() + new_package_default_version = package.manifest['package']['version'] + if old_package.version > new_package_default_version: + log.info(f'{old_package.name} package version is lower ' + f'then the default in new image: ' + f'{old_package.version} > {new_package_default_version}') + new_package.version = old_package.version + migrate_package(old_package, new_package) + else: + self.install(f'{new_package.name}={new_package_default_version}') + else: + # No default version and package is not installed. + # Migrate old package same version. + new_package.version = old_package.version + migrate_package(old_package, new_package) + + self.database.commit() + + def get_installed_package(self, name: str) -> Package: + """ Get installed package by name. + + Args: + name: package name. + Returns: + Package object. + """ + + package_entry = self.database.get_package(name) + source = LocalSource(package_entry, + self.database, + self.docker, + self.metadata_resolver) + return source.get_package() + + def get_package_source(self, + package_expression: Optional[str] = None, + repository_reference: Optional[str] = None, + tarboll_path: Optional[str] = None, + package_ref: Optional[PackageReference] = None): + """ Returns PackageSource object based on input source. + + Args: + package_expression: SONiC Package expression string + repository_reference: Install from REPO[:TAG][@DIGEST] + tarboll_path: Install from image tarball + package_ref: Package reference object + Returns: + SONiC Package object. + Raises: + ValueError if no source specified. + """ + + if package_expression: + ref = parse_reference_expression(package_expression) + return self.get_package_source(package_ref=ref) + elif repository_reference: + repo_ref = DockerReference.parse(repository_reference) + repository = repo_ref['name'] + reference = repo_ref['tag'] or repo_ref['digest'] + reference = reference or 'latest' + return RegistrySource(repository, + reference, + self.database, + self.docker, + self.metadata_resolver) + elif tarboll_path: + return TarballSource(tarboll_path, + self.database, + self.docker, + self.metadata_resolver) + elif package_ref: + package_entry = self.database.get_package(package_ref.name) + + # Determine the reference if not specified. + # If package is installed assume the installed + # one is requested, otherwise look for default + # reference defined for this package. In case package + # does not have a default reference raise an error. + if package_ref.reference is None: + if package_entry.installed: + return LocalSource(package_entry, + self.database, + self.docker, + self.metadata_resolver) + if package_entry.default_reference is not None: + package_ref.reference = package_entry.default_reference + else: + raise PackageManagerError(f'No default reference tag. ' + f'Please specify the version or tag explicitly') + + return RegistrySource(package_entry.repository, + package_ref.reference, + self.database, + self.docker, + self.metadata_resolver) + else: + raise ValueError('No package source provided') + + def get_package_available_versions(self, + name: str, + all: bool = False) -> Iterable: + """ Returns a list of available versions for package. + + Args: + name: Package name. + all: If set to True will return all tags including + those which do not follow semantic versioning. + Returns: + List of versions + """ + package_info = self.database.get_package(name) + registry = self.registry_resolver.get_registry_for(package_info.repository) + available_tags = registry.tags(package_info.repository) + + def is_semantic_ver_tag(tag: str) -> bool: + try: + tag_to_version(tag) + return True + except ValueError: + pass + return False + + if all: + return available_tags + + return map(tag_to_version, filter(is_semantic_ver_tag, available_tags)) + + def is_installed(self, name: str) -> bool: + """ Returns boolean whether a package called name is installed. + + Args: + name: Package name. + Returns: + True if package is installed, False otherwise. + """ + + if not self.database.has_package(name): + return False + package_info = self.database.get_package(name) + return package_info.installed + + def get_installed_packages(self) -> Dict[str, Package]: + """ Returns a dictionary of installed packages where + keys are package names and values are package objects. + + Returns: + Installed packages dictionary. + """ + + return { + entry.name: self.get_installed_package(entry.name) + for entry in self.database if entry.installed + } + + def _migrate_package_database(self, old_package_database: PackageDatabase): + """ Performs part of package migration process. + For every package in old_package_database that is not listed in current + database add a corresponding entry to current database. """ + + for package in old_package_database: + if not self.database.has_package(package.name): + self.database.add_package(package.name, + package.repository, + package.description, + package.default_reference) + + def _get_installed_packages_and(self, package: Package) -> Dict[str, Package]: + """ Returns a dictionary of installed packages with their names as keys + adding a package provided in the argument. """ + + packages = self.get_installed_packages() + packages[package.name] = package + return packages + + def _get_installed_packages_except(self, package: Package) -> Dict[str, Package]: + """ Returns a dictionary of installed packages with their names as keys + removing a package provided in the argument. """ + + packages = self.get_installed_packages() + packages.pop(package.name) + return packages + + # TODO: Replace with "config feature" command. + # The problem with current "config feature" command + # is that it is asynchronous, thus can't be used + # for package upgrade purposes where we need to wait + # till service stops before upgrading docker image. + # It would be really handy if we could just call + # something like: "config feature state --wait" + # instead of operating on systemd service since + # this is basically a duplicated code from "hostcfgd". + def _systemctl_action(self, package: Package, action: str): + """ Execute systemctl action for a service supporting + multi-asic services. """ + + name = package.manifest['service']['name'] + host_service = package.manifest['service']['host-service'] + asic_service = package.manifest['service']['asic-service'] + single_instance = host_service or (asic_service and not self.is_multi_npu) + multi_instance = asic_service and self.is_multi_npu + + if in_chroot(): + return + + if single_instance: + run_command(f'systemctl {action} {name}') + if multi_instance: + for npu in range(self.num_npus): + run_command(f'systemctl {action} {name}@{npu}') + + @staticmethod + def _get_cli_plugin_name(package: Package): + return utils.make_python_identifier(package.name) + '.py' + + @classmethod + def _get_cli_plugin_path(cls, package: Package, command): + pkg_loader = pkgutil.get_loader(f'{command}.plugins') + if pkg_loader is None: + raise PackageManagerError(f'Failed to get plugins path for {command} CLI') + plugins_pkg_path = os.path.dirname(pkg_loader.path) + return os.path.join(plugins_pkg_path, cls._get_cli_plugin_name(package)) + + def _install_cli_plugins(self, package: Package): + for command in ('show', 'config', 'clear'): + self._install_cli_plugin(package, command) + + def _uninstall_cli_plugins(self, package: Package): + for command in ('show', 'config', 'clear'): + self._uninstall_cli_plugin(package, command) + + def _install_cli_plugin(self, package: Package, command: str): + image_plugin_path = package.manifest['cli'][command] + if not image_plugin_path: + return + host_plugin_path = self._get_cli_plugin_path(package, command) + self.docker.extract(package.entry.image_id, image_plugin_path, host_plugin_path) + + def _uninstall_cli_plugin(self, package: Package, command: str): + image_plugin_path = package.manifest['cli'][command] + if not image_plugin_path: + return + host_plugin_path = self._get_cli_plugin_path(package, command) + if os.path.exists(host_plugin_path): + os.remove(host_plugin_path) + + @staticmethod + def get_manager() -> 'PackageManager': + """ Creates and returns PackageManager instance. + + Returns: + PackageManager + """ + + docker_api = DockerApi(docker.from_env()) + registry_resolver = RegistryResolver() + return PackageManager(DockerApi(docker.from_env(), ProgressManager()), + registry_resolver, + PackageDatabase.from_file(), + MetadataResolver(docker_api, registry_resolver), + ServiceCreator(FeatureRegistry(SonicDB), SonicDB), + device_info, + filelock.FileLock(PACKAGE_MANAGER_LOCK_FILE, timeout=0)) diff --git a/sonic_package_manager/manifest.py b/sonic_package_manager/manifest.py new file mode 100644 index 0000000000..b58a0d10f0 --- /dev/null +++ b/sonic_package_manager/manifest.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python + +from abc import ABC +from dataclasses import dataclass +from typing import Optional, List, Dict, Any + +from sonic_package_manager.constraint import ( + ComponentConstraints, + PackageConstraint +) +from sonic_package_manager.errors import ManifestError +from sonic_package_manager.version import Version + + +class ManifestSchema: + """ ManifestSchema class describes and provides marshalling + and unmarshalling methods. + """ + + class Marshaller: + """ Base class for marshaling and un-marshaling. """ + + def marshal(self, value): + """ Validates and returns a valid manifest dictionary. + + Args: + value: input value to validate. + Returns: valid manifest node. + """ + + raise NotImplementedError + + def unmarshal(self, value): + """ Un-marshals the manifest to a dictionary. + + Args: + value: input value to validate. + Returns: valid manifest node. + """ + + raise NotImplementedError + + @dataclass + class ParsedMarshaller(Marshaller): + """ Marshaller used on types which support class method "parse" """ + + type: Any + + def marshal(self, value): + try: + return self.type.parse(value) + except ValueError as err: + raise ManifestError(f'Failed to marshal {value}: {err}') + + def unmarshal(self, value): + try: + if hasattr(value, 'deparse'): + return value.deparse() + return str(value) + except Exception as err: + raise ManifestError(f'Failed to unmarshal {value}: {err}') + + @dataclass + class DefaultMarshaller(Marshaller): + """ Default marshaller that validates if the given + value is instance of given type. """ + + type: type + + def marshal(self, value): + if not isinstance(value, self.type): + raise ManifestError(f'{value} is not of type {self.type.__name__}') + return value + + def unmarshal(self, value): + return value + + @dataclass + class ManifestNode(Marshaller, ABC): + """ + Base class for any manifest object. + + Attrs: + key: String representing the key for this object. + """ + + key: str + + @dataclass + class ManifestRoot(ManifestNode): + items: List + + def marshal(self, value: Optional[dict]): + result = {} + if value is None: + value = {} + + for item in self.items: + next_value = value.get(item.key) + result[item.key] = item.marshal(next_value) + return result + + def unmarshal(self, value): + return_value = {} + for item in self.items: + return_value[item.key] = item.unmarshal(value[item.key]) + return return_value + + @dataclass + class ManifestField(ManifestNode): + type: Any + default: Optional[Any] = None + + def marshal(self, value): + if value is None: + if self.default is not None: + return self.default + raise ManifestError(f'{self.key} is a required field but it is missing') + try: + return_value = self.type.marshal(value) + except Exception as err: + raise ManifestError(f'Failed to marshal {self.key}: {err}') + return return_value + + def unmarshal(self, value): + return self.type.unmarshal(value) + + @dataclass + class ManifestArray(ManifestNode): + type: Any + + def marshal(self, value): + if value is None: + return [] + + return_value = [] + try: + for item in value: + return_value.append(self.type.marshal(item)) + except Exception as err: + raise ManifestError(f'Failed to convert {self.key}={value} to array: {err}') + + return return_value + + def unmarshal(self, value): + return [self.type.unmarshal(item) for item in value] + + # TODO: add description for each field + SCHEMA = ManifestRoot('root', [ + ManifestField('version', ParsedMarshaller(Version), Version(1, 0, 0)), + ManifestRoot('package', [ + ManifestField('version', ParsedMarshaller(Version)), + ManifestField('name', DefaultMarshaller(str)), + ManifestField('description', DefaultMarshaller(str), ''), + ManifestField('base-os', ParsedMarshaller(ComponentConstraints), ComponentConstraints()), + ManifestArray('depends', ParsedMarshaller(PackageConstraint)), + ManifestArray('breaks', ParsedMarshaller(PackageConstraint)), + ManifestField('init-cfg', DefaultMarshaller(dict), dict()), + ManifestField('changelog', DefaultMarshaller(dict), dict()), + ManifestField('debug-dump', DefaultMarshaller(str), ''), + ]), + ManifestRoot('service', [ + ManifestField('name', DefaultMarshaller(str)), + ManifestArray('requires', DefaultMarshaller(str)), + ManifestArray('requisite', DefaultMarshaller(str)), + ManifestArray('wanted-by', DefaultMarshaller(str)), + ManifestArray('after', DefaultMarshaller(str)), + ManifestArray('before', DefaultMarshaller(str)), + ManifestArray('dependent', DefaultMarshaller(str)), + ManifestArray('dependent-of', DefaultMarshaller(str)), + ManifestField('post-start-action', DefaultMarshaller(str), ''), + ManifestField('pre-shutdown-action', DefaultMarshaller(str), ''), + ManifestField('asic-service', DefaultMarshaller(bool), False), + ManifestField('host-service', DefaultMarshaller(bool), True), + ManifestField('delayed', DefaultMarshaller(bool), False), + ]), + ManifestRoot('container', [ + ManifestField('privileged', DefaultMarshaller(bool), False), + ManifestArray('volumes', DefaultMarshaller(str)), + ManifestArray('mounts', ManifestRoot('mounts', [ + ManifestField('source', DefaultMarshaller(str)), + ManifestField('target', DefaultMarshaller(str)), + ManifestField('type', DefaultMarshaller(str)), + ])), + ManifestField('environment', DefaultMarshaller(dict), dict()), + ManifestArray('tmpfs', DefaultMarshaller(str)), + ]), + ManifestArray('processes', ManifestRoot('processes', [ + ManifestField('name', DefaultMarshaller(str)), + ])), + ManifestRoot('cli', [ + ManifestField('mandatory', DefaultMarshaller(bool), False), + ManifestField('show', DefaultMarshaller(str), ''), + ManifestField('config', DefaultMarshaller(str), ''), + ManifestField('clear', DefaultMarshaller(str), '') + ]) + ]) + + +class Manifest(dict): + """ Manifest object. """ + + SCHEMA = ManifestSchema.SCHEMA + + @classmethod + def marshal(cls, input_dict: dict): + return Manifest(cls.SCHEMA.marshal(input_dict)) + + def unmarshal(self) -> Dict: + return self.SCHEMA.unmarshal(self) diff --git a/sonic_package_manager/metadata.py b/sonic_package_manager/metadata.py new file mode 100644 index 0000000000..7f7c25ceaf --- /dev/null +++ b/sonic_package_manager/metadata.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python + +from dataclasses import dataclass, field + +import json +import tarfile +from typing import Dict + +from sonic_package_manager.errors import MetadataError +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.version import Version + + +def deep_update(dst: Dict, src: Dict) -> Dict: + """ Deep update dst dictionary with src dictionary. + + Args: + dst: Dictionary to update + src: Dictionary to update with + + Returns: + New merged dictionary. + """ + + for key, value in src.items(): + if isinstance(value, dict): + node = dst.setdefault(key, {}) + deep_update(node, value) + else: + dst[key] = value + return dst + + +def translate_plain_to_tree(plain: Dict[str, str], sep='.') -> Dict: + """ Convert plain key/value dictionary into + a tree by spliting the key with '.' + + Args: + plain: Dictionary to convert into tree-like structure. + Keys in this dictionary have to be in a format: + "[key0].+", e.g: "com.azure.sonic" that + will be converted into tree like struct: + + { + "com": { + "azure": { + "sonic": {} + } + } + } + sep: Seperator string + + Returns: + Tree like structure + + """ + + res = {} + for key, value in plain.items(): + if sep not in key: + res[key] = value + continue + namespace, key = key.split(sep, 1) + res.setdefault(namespace, {}) + deep_update(res[namespace], translate_plain_to_tree({key: value})) + return res + + +@dataclass +class Metadata: + """ Package metadata object that can be retrieved from + OCI image manifest. """ + + manifest: Manifest + components: Dict[str, Version] = field(default_factory=dict) + + +class MetadataResolver: + """ Resolve metadata for package from different sources. """ + + def __init__(self, docker, registry_resolver): + self.docker = docker + self.registry_resolver = registry_resolver + + def from_local(self, image: str) -> Metadata: + """ Reads manifest from locally installed docker image. + + Args: + image: Docker image ID + Returns: + Metadata + Raises: + MetadataError + """ + + labels = self.docker.labels(image) + if labels is None: + raise MetadataError('No manifest found in image labels') + + return self.from_labels(labels) + + def from_registry(self, + repository: str, + reference: str) -> Metadata: + """ Reads manifest from remote registry. + + Args: + repository: Repository to pull image from + reference: Reference, either tag or digest + Returns: + Metadata + Raises: + MetadataError + """ + + registry = self.registry_resolver.get_registry_for(repository) + + manifest = registry.manifest(repository, reference) + digest = manifest['config']['digest'] + + blob = registry.blobs(repository, digest) + labels = blob['config']['Labels'] + if labels is None: + raise MetadataError('No manifest found in image labels') + + return self.from_labels(labels) + + def from_tarball(self, image_path: str) -> Metadata: + """ Reads manifest image tarball. + Args: + image_path: Path to image tarball. + Returns: + Manifest + Raises: + MetadataError + """ + + with tarfile.open(image_path) as image: + manifest = json.loads(image.extractfile('manifest.json').read()) + + blob = manifest[0]['Config'] + image_config = json.loads(image.extractfile(blob).read()) + labels = image_config['config']['Labels'] + if labels is None: + raise MetadataError('No manifest found in image labels') + + return self.from_labels(labels) + + @classmethod + def from_labels(cls, labels: Dict[str, str]) -> Metadata: + """ Get manifest from image labels. + + Args: + labels: key, value string pairs + Returns: + Metadata + Raises: + MetadataError + """ + + metadata_dict = translate_plain_to_tree(labels) + try: + sonic_metadata = metadata_dict['com']['azure']['sonic'] + except KeyError: + raise MetadataError('No metadata found in image labels') + + try: + manifest_string = sonic_metadata['manifest'] + except KeyError: + raise MetadataError('No manifest found in image labels') + + try: + manifest_dict = json.loads(manifest_string) + except (ValueError, TypeError) as err: + raise MetadataError(f'Failed to parse manifest JSON: {err}') + + components = {} + if 'versions' in sonic_metadata: + for component, version in sonic_metadata['versions'].items(): + try: + components[component] = Version.parse(version) + except ValueError as err: + raise MetadataError(f'Failed to parse component version: {err}') + + return Metadata(Manifest.marshal(manifest_dict), components) diff --git a/sonic_package_manager/package.py b/sonic_package_manager/package.py new file mode 100644 index 0000000000..2928f17392 --- /dev/null +++ b/sonic_package_manager/package.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +from dataclasses import dataclass + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.metadata import Metadata + + +@dataclass +class Package: + """ Package class is a representation of Package. + + Attributes: + entry: Package entry in package database + metadata: Metadata object for this package + manifest: Manifest for this package + components: Components versions for this package + name: Name of the package from package database + repository: Default repository to pull this package from + image_id: Docker image ID of the installed package; + It is set to None if package is not installed. + installed: Boolean flag whether package is installed or not. + build_in: Boolean flag whether package is built in or not. + + """ + + entry: PackageEntry + metadata: Metadata + + @property + def name(self): return self.entry.name + + @property + def repository(self): return self.entry.repository + + @property + def image_id(self): return self.entry.image_id + + @property + def installed(self): return self.entry.installed + + @property + def built_in(self): return self.entry.built_in + + @property + def version(self): return self.entry.version + + @property + def manifest(self): return self.metadata.manifest + + @property + def components(self): return self.metadata.components + diff --git a/sonic_package_manager/progress.py b/sonic_package_manager/progress.py new file mode 100644 index 0000000000..5258ebab98 --- /dev/null +++ b/sonic_package_manager/progress.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +import enlighten + +BAR_FMT = '{desc}{desc_pad}{percentage:3.0f}%|{bar}| {count:{len_total}.2f}/{total:.2f}{unit_pad}{unit} ' + \ + '[{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]' + +COUNTER_FMT = '{desc}{desc_pad}{count:.1f} {unit}{unit_pad}' + \ + '[{elapsed}, {rate:.2f}{unit_pad}{unit}/s]{fill}' + + +class ProgressManager: + """ ProgressManager is used for creating multiple progress bars + which nicely interact with logging and prints. """ + + def __init__(self): + self.manager = enlighten.get_manager() + self.pbars = {} + + def __enter__(self): + return self.manager.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + return self.manager.__exit__(exc_type, exc_val, exc_tb) + + def new(self, id: str, *args, **kwargs): + """ Creates new progress bar with id. + Args: + id: progress bar identifier + *args: pass arguments for progress bar creation + **kwargs: pass keyword arguments for progress bar creation. + """ + + if 'bar_format' not in kwargs: + kwargs['bar_format'] = BAR_FMT + if 'counter_format' not in kwargs: + kwargs['counter_format'] = COUNTER_FMT + + self.pbars[id] = self.manager.counter(*args, **kwargs) + + def get(self, id: str): + """ Returns progress bar by id. + Args: + id: progress bar identifier + Returns: + Progress bar. + """ + + return self.pbars[id] + + def __contains__(self, id): + return id in self.pbars diff --git a/sonic_package_manager/reference.py b/sonic_package_manager/reference.py new file mode 100644 index 0000000000..9c4d8e825c --- /dev/null +++ b/sonic_package_manager/reference.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +import re +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class PackageReference: + """ PackageReference is a package version constraint. """ + + name: str + reference: Optional[str] = None + + def __str__(self): + return f'{self.name} {self.reference}' + + @staticmethod + def parse(expression: str) -> 'PackageReference': + REQUIREMENT_SPECIFIER_RE = \ + r'(?P[A-Za-z0-9_-]+)(?P@(?P.*))' + + match = re.match(REQUIREMENT_SPECIFIER_RE, expression) + if match is None: + raise ValueError(f'Invalid reference specifier {expression}') + groupdict = match.groupdict() + name = groupdict.get('name') + reference = groupdict.get('reference') + + return PackageReference(name, reference) diff --git a/sonic_package_manager/registry.py b/sonic_package_manager/registry.py new file mode 100644 index 0000000000..8a09d9136e --- /dev/null +++ b/sonic_package_manager/registry.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python + +import json +from dataclasses import dataclass +from typing import List, Dict + +import requests +import www_authenticate +from docker_image import reference +from prettyprinter import pformat + +from sonic_package_manager.logger import log +from sonic_package_manager.utils import DockerReference + + +class AuthenticationServiceError(Exception): + """ Exception class for errors related to authentication. """ + + pass + + +class AuthenticationService: + """ AuthenticationService provides an authentication tokens. """ + + @staticmethod + def get_token(realm, service, scope) -> str: + """ Retrieve an authentication token. + + Args: + realm: Realm: url to request token. + service: service to request token for. + scope: scope to requests token for. + Returns: + token value as a string. + """ + + log.debug(f'getting authentication token: realm={realm} service={service} scope={scope}') + + response = requests.get(f'{realm}?scope={scope}&service={service}') + if response.status_code != requests.codes.ok: + raise AuthenticationServiceError(f'Failed to retrieve token') + + content = json.loads(response.content) + token = content['token'] + expires_in = content['expires_in'] + + log.debug(f'authentication token for realm={realm} service={service} scope={scope}: ' + f'token={token} expires_in={expires_in}') + + return token + + +@dataclass +class RegistryApiError(Exception): + """ Class for registry related errors. """ + + msg: str + response: requests.Response + + def __str__(self): + code = self.response.status_code + content = self.response.content.decode() + try: + content = json.loads(content) + except ValueError: + pass + return f'{self.msg}: code: {code} details: {pformat(content)}' + + +class Registry: + """ Provides a Docker registry interface. """ + + MIME_DOCKER_MANIFEST = 'application/vnd.docker.distribution.manifest.v2+json' + + def __init__(self, host: str): + self.url = host + + @staticmethod + def _execute_get_request(url, headers): + response = requests.get(url, headers=headers) + if response.status_code == requests.codes.unauthorized: + # Get authentication details from headers + # Registry should tell how to authenticate + www_authenticate_details = response.headers['Www-Authenticate'] + log.debug(f'unauthorized: retrieving authentication details ' + f'from response headers {www_authenticate_details}') + bearer = www_authenticate.parse(www_authenticate_details)['bearer'] + token = AuthenticationService.get_token(**bearer) + headers['Authorization'] = f'Bearer {token}' + # Repeat request + response = requests.get(url, headers=headers) + return response + + def _get_base_url(self, repository: str): + return f'{self.url}/v2/{repository}' + + def tags(self, repository: str) -> List[str]: + log.debug(f'getting tags for {repository}') + + _, repository = reference.Reference.split_docker_domain(repository) + headers = {'Accept': 'application/json'} + url = f'{self._get_base_url(repository)}/tags/list' + response = self._execute_get_request(url, headers) + if response.status_code != requests.codes.ok: + raise RegistryApiError(f'Failed to retrieve tags from {repository}', response) + + content = json.loads(response.content) + log.debug(f'tags list api response: f{content}') + + return content['tags'] + + def manifest(self, repository: str, ref: str) -> Dict: + log.debug(f'getting manifest for {repository}:{ref}') + + _, repository = reference.Reference.split_docker_domain(repository) + headers = {'Accept': self.MIME_DOCKER_MANIFEST} + url = f'{self._get_base_url(repository)}/manifests/{ref}' + response = self._execute_get_request(url, headers) + + if response.status_code != requests.codes.ok: + raise RegistryApiError(f'Failed to retrieve manifest for {repository}:{ref}', response) + + content = json.loads(response.content) + log.debug(f'manifest content for {repository}:{ref}: {content}') + + return content + + def blobs(self, repository: str, digest: str): + log.debug(f'retrieving blob for {repository}:{digest}') + + _, repository = reference.Reference.split_docker_domain(repository) + headers = {'Accept': self.MIME_DOCKER_MANIFEST} + url = f'{self._get_base_url(repository)}/blobs/{digest}' + response = self._execute_get_request(url, headers) + if response.status_code != requests.codes.ok: + raise RegistryApiError(f'Failed to retrieve blobs for {repository}:{digest}', response) + content = json.loads(response.content) + + log.debug(f'retrieved blob for {repository}:{digest}: {content}') + return content + + +class RegistryResolver: + """ Returns a registry object based on the input repository reference + string. """ + + DockerHubRegistry = Registry('https://index.docker.io') + + def __init__(self): + pass + + def get_registry_for(self, ref: str) -> Registry: + domain, _ = DockerReference.split_docker_domain(ref) + if domain == reference.DEFAULT_DOMAIN: + return self.DockerHubRegistry + # TODO: support insecure registries + return Registry(f'https://{domain}') diff --git a/sonic_package_manager/service_creator/__init__.py b/sonic_package_manager/service_creator/__init__.py new file mode 100644 index 0000000000..e2af81ceb5 --- /dev/null +++ b/sonic_package_manager/service_creator/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +ETC_SONIC_PATH = '/etc/sonic' diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py new file mode 100644 index 0000000000..54b9315bee --- /dev/null +++ b/sonic_package_manager/service_creator/creator.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python + +import contextlib +import os +import stat +import subprocess +from typing import Dict + +import jinja2 as jinja2 +from prettyprinter import pformat + +from sonic_package_manager.logger import log +from sonic_package_manager.package import Package +from sonic_package_manager.service_creator import ETC_SONIC_PATH +from sonic_package_manager.service_creator.feature import FeatureRegistry +from sonic_package_manager.service_creator.utils import in_chroot + +SERVICE_FILE_TEMPLATE = 'sonic.service.j2' +TIMER_UNIT_TEMPLATE = 'timer.unit.j2' + +SYSTEMD_LOCATION = '/usr/lib/systemd/system' + +SERVICE_MGMT_SCRIPT_TEMPLATE = 'service_mgmt.sh.j2' +SERVICE_MGMT_SCRIPT_LOCATION = '/usr/local/bin' + +DOCKER_CTL_SCRIPT_TEMPLATE = 'docker_image_ctl.j2' +DOCKER_CTL_SCRIPT_LOCATION = '/usr/bin' + +DEBUG_DUMP_SCRIPT_TEMPLATE = 'dump.sh.j2' +DEBUG_DUMP_SCRIPT_LOCATION = '/usr/local/bin/debug-dump/' + +TEMPLATES_PATH = '/usr/share/sonic/templates' + + +class ServiceCreatorError(Exception): + pass + + +def render_template(in_template: str, + outfile: str, + render_ctx: Dict, + executable: bool = False): + """ Template renderer helper routine. + Args: + in_template: Input file with template content + outfile: Output file to render template to + render_ctx: Dictionary used to generate jinja2 template + executable: Set executable bit on rendered file + """ + + log.debug(f'Rendering {in_template} to {outfile} with {pformat(render_ctx)}') + + with open(in_template, 'r') as instream: + template = jinja2.Template(instream.read()) + + with open(outfile, 'w') as outstream: + outstream.write(template.render(**render_ctx)) + + if executable: + set_executable_bit(outfile) + + +def get_tmpl_path(template_name: str) -> str: + """ Returns a path to a template. + Args: + template_name: Template file name. + """ + + return os.path.join(TEMPLATES_PATH, template_name) + + +def set_executable_bit(filepath): + """ Sets +x on filepath. """ + + st = os.stat(filepath) + os.chmod(filepath, st.st_mode | stat.S_IEXEC) + + +def run_command(command: str): + """ Run arbitrary bash command. + Args: + command: String command to execute as bash script + Raises: + PackageManagerError: Raised when the command return code + is not 0. + """ + + log.debug(f'running command: {command}') + + proc = subprocess.Popen(command, + shell=True, + executable='/bin/bash', + stdout=subprocess.PIPE) + (out, _) = proc.communicate() + if proc.returncode != 0: + raise ServiceCreatorError(f'Failed to execute "{command}"') + + +class ServiceCreator: + """ Creates and registers services in SONiC based on the package + manifest. """ + + def __init__(self, feature_registry: FeatureRegistry, sonic_db): + self.feature_registry = feature_registry + self.sonic_db = sonic_db + + def create(self, + package: Package, + register_feature=True, + state='enabled', + owner='local'): + try: + self.generate_container_mgmt(package) + self.generate_service_mgmt(package) + self.update_dependent_list_file(package) + self.generate_systemd_service(package) + self.generate_dump_script(package) + + self.set_initial_config(package) + + self.post_operation_hook() + + if register_feature: + self.feature_registry.register(package.manifest, + state, owner) + except (Exception, KeyboardInterrupt): + self.remove(package, register_feature) + raise + + def remove(self, package: Package, deregister_feature=True): + name = package.manifest['service']['name'] + + def remove_file(path): + if os.path.exists(path): + os.remove(path) + log.info(f'removed {path}') + + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + + self.update_dependent_list_file(package, remove=True) + + self.post_operation_hook() + + if deregister_feature: + self.feature_registry.deregister(package.manifest['service']['name']) + self.remove_config(package) + + def post_operation_hook(self): + if not in_chroot(): + run_command('systemctl daemon-reload') + + def generate_container_mgmt(self, package: Package): + image_id = package.image_id + name = package.manifest['service']['name'] + container_spec = package.manifest['container'] + script_path = os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh') + script_template = get_tmpl_path(DOCKER_CTL_SCRIPT_TEMPLATE) + run_opt = [] + + if container_spec['privileged']: + run_opt.append('--privileged') + + run_opt.append('-t') + + for volume in container_spec['volumes']: + run_opt.append(f'-v {volume}') + + for mount in container_spec['mounts']: + mount_type, source, target = mount['type'], mount['source'], mount['target'] + run_opt.append(f'--mount type={mount_type},source={source},target={target}') + + for tmpfs_mount in container_spec['tmpfs']: + run_opt.append(f'--tmpfs {tmpfs_mount}') + + for env_name, value in container_spec['environment'].items(): + run_opt.append(f'-e {env_name}={value}') + + run_opt = ' '.join(run_opt) + render_ctx = { + 'docker_container_name': name, + 'docker_image_id': image_id, + 'docker_image_run_opt': run_opt, + } + render_template(script_template, script_path, render_ctx, executable=True) + log.info(f'generated {script_path}') + + def generate_service_mgmt(self, package: Package): + name = package.manifest['service']['name'] + multi_instance_services = self.feature_registry.get_multi_instance_features() + script_path = os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh') + scrip_template = get_tmpl_path(SERVICE_MGMT_SCRIPT_TEMPLATE) + render_ctx = { + 'source': get_tmpl_path(SERVICE_MGMT_SCRIPT_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + 'multi_instance_services': multi_instance_services, + } + render_template(scrip_template, script_path, render_ctx, executable=True) + log.info(f'generated {script_path}') + + def generate_systemd_service(self, package: Package): + name = package.manifest['service']['name'] + multi_instance_services = self.feature_registry.get_multi_instance_features() + + template = get_tmpl_path(SERVICE_FILE_TEMPLATE) + template_vars = { + 'source': get_tmpl_path(SERVICE_FILE_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + 'multi_instance': False, + 'multi_instance_services': multi_instance_services, + } + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.service') + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + if package.manifest['service']['asic-service']: + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.service') + template_vars['multi_instance'] = True + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + if package.manifest['service']['delayed']: + template_vars = { + 'source': get_tmpl_path(TIMER_UNIT_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + 'multi_instance': False, + } + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.timer') + template = os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE) + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + if package.manifest['service']['asic-service']: + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.timer') + template_vars['multi_instance'] = True + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + def update_dependent_list_file(self, package: Package, remove=False): + name = package.manifest['service']['name'] + dependent_of = package.manifest['service']['dependent-of'] + host_service = package.manifest['service']['host-service'] + asic_service = package.manifest['service']['asic-service'] + + def update_dependent(service, name, multi_inst): + if multi_inst: + filename = f'{service}_multi_inst_dependent' + else: + filename = f'{service}_dependent' + + filepath = os.path.join(ETC_SONIC_PATH, filename) + + dependent_services = set() + if os.path.exists(filepath): + with open(filepath) as fp: + dependent_services.update({line.strip() for line in fp.readlines()}) + if remove: + with contextlib.suppress(KeyError): + dependent_services.remove(name) + else: + dependent_services.add(name) + with open(filepath, 'w') as fp: + fp.write('\n'.join(dependent_services)) + + for service in dependent_of: + if host_service: + update_dependent(service, name, multi_inst=False) + if asic_service: + update_dependent(service, name, multi_inst=True) + + def generate_dump_script(self, package): + name = package.manifest['service']['name'] + + if not package.manifest['package']['debug-dump']: + return + + if not os.path.exists(DEBUG_DUMP_SCRIPT_LOCATION): + os.mkdir(DEBUG_DUMP_SCRIPT_LOCATION) + + scrip_template = os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE) + script_path = os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}') + render_ctx = { + 'source': get_tmpl_path(SERVICE_MGMT_SCRIPT_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + } + render_template(scrip_template, script_path, render_ctx, executable=True) + log.info(f'generated {script_path}') + + def get_tables(self, table_name): + tables = [] + + running_table = self.sonic_db.running_table(table_name) + if running_table is not None: + tables.append(running_table) + + persistent_table = self.sonic_db.persistent_table(table_name) + if persistent_table is not None: + tables.append(persistent_table) + + initial_table = self.sonic_db.initial_table(table_name) + if initial_table is not None: + tables.append(initial_table) + + return tables + + def set_initial_config(self, package): + init_cfg = package.manifest['package']['init-cfg'] + + for tablename, content in init_cfg.items(): + if not isinstance(content, dict): + continue + + tables = self.get_tables(tablename) + + for key in content: + for table in tables: + cfg = content[key] + exists, old_fvs = table.get(key) + if exists: + cfg.update(old_fvs) + fvs = list(cfg.items()) + table.set(key, fvs) + + def remove_config(self, package): + # Remove configuration based on init-cfg tables, so having + # init-cfg even with tables without keys might be a good idea. + # TODO: init-cfg should be validated with yang model + # TODO: remove config from tables known to yang model + init_cfg = package.manifest['package']['init-cfg'] + + for tablename, content in init_cfg.items(): + if not isinstance(content, dict): + continue + + tables = self.get_tables(tablename) + + for key in content: + for table in tables: + table._del(key) diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py new file mode 100644 index 0000000000..4df06384d2 --- /dev/null +++ b/sonic_package_manager/service_creator/feature.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python + +""" This module implements new feature registration/de-registration in SONiC system. """ + +from typing import Dict, Type + +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.service_creator.sonic_db import SonicDB + +FEATURE = 'FEATURE' +DEFAULT_FEATURE_CONFIG = { + 'state': 'disabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'local' +} + + +class FeatureRegistry: + """ FeatureRegistry class provides an interface to + register/de-register new feature persistently. """ + + def __init__(self, sonic_db: Type[SonicDB]): + self._sonic_db = sonic_db + + def register(self, + manifest: Manifest, + state: str = 'disabled', + owner: str = 'local'): + name = manifest['service']['name'] + for table in self._get_tables(): + cfg_entries = self.get_default_feature_entries(state, owner) + non_cfg_entries = self.get_non_configurable_feature_entries(manifest) + + exists, current_cfg = table.get(name) + + new_cfg = cfg_entries.copy() + # Override configurable entries with CONFIG DB data. + new_cfg = {**new_cfg, **dict(current_cfg)} + # Override CONFIG DB data with non configurable entries. + new_cfg = {**new_cfg, **non_cfg_entries} + + table.set(name, list(new_cfg.items())) + + def deregister(self, name: str): + for table in self._get_tables(): + table._del(name) + + def is_feature_enabled(self, name: str) -> bool: + """ Returns whether the feature is current enabled + or not. Accesses running CONFIG DB. If no running CONFIG_DB + table is found in tables returns False. """ + + running_db_table = self._sonic_db.running_table(FEATURE) + if running_db_table is None: + return False + + exists, cfg = running_db_table.get(name) + if not exists: + return False + cfg = dict(cfg) + return cfg.get('state').lower() == 'enabled' + + def get_multi_instance_features(self): + res = [] + init_db_table = self._sonic_db.initial_table(FEATURE) + for feature in init_db_table.keys(): + exists, cfg = init_db_table.get(feature) + assert exists + cfg = dict(cfg) + asic_flag = str(cfg.get('has_per_asic_scope', 'False')) + if asic_flag.lower() == 'true': + res.append(feature) + return res + + @staticmethod + def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: + """ Get configurable feature table entries: + e.g. 'state', 'auto_restart', etc. """ + + cfg = DEFAULT_FEATURE_CONFIG.copy() + if state: + cfg['state'] = state + if owner: + cfg['set_owner'] = owner + return cfg + + @staticmethod + def get_non_configurable_feature_entries(manifest) -> Dict[str, str]: + """ Get non-configurable feature table entries: e.g. 'has_timer' """ + + return { + 'has_per_asic_scope': str(manifest['service']['asic-service']), + 'has_global_scope': str(manifest['service']['host-service']), + 'has_timer': str(manifest['service']['delayed']), + } + + def _get_tables(self): + tables = [] + running = self._sonic_db.running_table(FEATURE) + if running is not None: # it's Ok if there is no database container running + tables.append(running) + persistent = self._sonic_db.persistent_table(FEATURE) + if persistent is not None: # it's Ok if there is no config_db.json + tables.append(persistent) + tables.append(self._sonic_db.initial_table(FEATURE)) # init_cfg.json is must + + return tables diff --git a/sonic_package_manager/service_creator/sonic_db.py b/sonic_package_manager/service_creator/sonic_db.py new file mode 100644 index 0000000000..a064c60c4a --- /dev/null +++ b/sonic_package_manager/service_creator/sonic_db.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +import contextlib +import json +import os + +from swsscommon import swsscommon + +from sonic_package_manager.service_creator import ETC_SONIC_PATH +from sonic_package_manager.service_creator.utils import in_chroot + +CONFIG_DB = 'CONFIG_DB' +CONFIG_DB_JSON = os.path.join(ETC_SONIC_PATH, 'config_db.json') +INIT_CFG_JSON = os.path.join(ETC_SONIC_PATH, 'init_cfg.json') + + +class FileDbTable: + """ swsscommon.Table adapter for persistent DBs. """ + + def __init__(self, file, table): + self._file = file + self._table = table + + def keys(self): + with open(self._file) as stream: + config = json.load(stream) + return config.get(self._table, {}).keys() + + def get(self, key): + with open(self._file) as stream: + config = json.load(stream) + + table = config.get(self._table, {}) + exists = key in table + fvs_dict = table.get(key, {}) + fvs = list(fvs_dict.items()) + return exists, fvs + + def set(self, key, fvs): + with open(self._file) as stream: + config = json.load(stream) + + table = config.setdefault(self._table, {}) + table.update({key: dict(fvs)}) + + with open(self._file, 'w') as stream: + json.dump(config, stream, indent=4) + + def _del(self, key): + with open(self._file) as stream: + config = json.load(stream) + + with contextlib.suppress(KeyError): + config[self._table].pop(key) + + with open(self._file, 'w') as stream: + json.dump(config, stream, indent=4) + + +class SonicDB: + """ Store different DB access objects for + running DB and also for persistent and initial + configs. """ + + _running = None + + @classmethod + def running_table(cls, table): + """ Returns running DB table. """ + + # In chroot we can connect to a running + # DB via TCP socket, we should ignore this case. + if in_chroot(): + return None + + if cls._running is None: + try: + cls._running = swsscommon.DBConnector(CONFIG_DB, 0) + except RuntimeError: + # Failed to connect to DB. + return None + + return swsscommon.Table(cls._running, table) + + @classmethod + def persistent_table(cls, table): + """ Returns persistent DB table. """ + + if not os.path.exists(CONFIG_DB_JSON): + return None + + return FileDbTable(CONFIG_DB_JSON, table) + + @classmethod + def initial_table(cls, table): + """ Returns initial DB table. """ + + return FileDbTable(INIT_CFG_JSON, table) diff --git a/sonic_package_manager/service_creator/utils.py b/sonic_package_manager/service_creator/utils.py new file mode 100644 index 0000000000..cdeeb17abb --- /dev/null +++ b/sonic_package_manager/service_creator/utils.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +import os + + +def in_chroot() -> bool: + """ Verify if we are running in chroot or not + by comparing root / mount point device id and inode + with init process - /proc/1/root mount point device + id and inode. If those match we are not chroot-ed + otherwise we are. """ + + root_stat = os.stat('/') + init_root_stat = os.stat('/proc/1/root') + + return (root_stat.st_dev, root_stat.st_ino) != \ + (init_root_stat.st_dev, init_root_stat.st_ino) diff --git a/sonic_package_manager/source.py b/sonic_package_manager/source.py new file mode 100644 index 0000000000..c179e0b3ee --- /dev/null +++ b/sonic_package_manager/source.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 + +from sonic_package_manager.database import PackageDatabase, PackageEntry +from sonic_package_manager.dockerapi import DockerApi, get_repository_from_image +from sonic_package_manager.metadata import Metadata, MetadataResolver +from sonic_package_manager.package import Package + + +class PackageSource(object): + """ PackageSource abstracts the way manifest is read + and image is retrieved based on different image sources. + (i.e from registry, from tarball or locally installed) """ + + def __init__(self, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + self.database = database + self.docker = docker + self.metadata_resolver = metadata_resolver + + def get_metadata(self) -> Metadata: + """ Returns package manifest. + Child class has to implement this method. + + Returns: + Metadata + """ + raise NotImplementedError + + def install_image(self, package: Package): + """ Install image based on package source. + Child class has to implement this method. + + Args: + package: SONiC Package + Returns: + Docker Image object. + """ + + raise NotImplementedError + + def install(self, package: Package): + """ Install image based on package source, + record installation infromation in PackageEntry.. + + Args: + package: SONiC Package + """ + + image = self.install_image(package) + package.entry.image_id = image.id + # if no repository is defined for this package + # get repository from image + if not package.repository: + package.entry.repository = get_repository_from_image(image) + + def uninstall(self, package: Package): + """ Uninstall image. + + Args: + package: SONiC Package + """ + + self.docker.rmi(package.image_id) + package.entry.image_id = None + + def get_package(self) -> Package: + """ Returns SONiC Package based on manifest. + + Returns: + SONiC Package + """ + + metadata = self.get_metadata() + manifest = metadata.manifest + + name = manifest['package']['name'] + description = manifest['package']['description'] + + # Will be resolved in install() method. + # When installing from tarball we don't know yet + # the repository for this package. + repository = None + + if self.database.has_package(name): + # inherit package database info + package_entry = self.database.get_package(name) + else: + package_entry = PackageEntry(name, repository, + description=description) + + return Package( + package_entry, + metadata + ) + + +class TarballSource(PackageSource): + """ TarballSource implements PackageSource + for locally existing image saved as tarball. """ + + def __init__(self, + tarball_path: str, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + super().__init__(database, + docker, + metadata_resolver) + self.tarball_path = tarball_path + + def get_metadata(self) -> Metadata: + """ Returns manifest read from tarball. """ + + return self.metadata_resolver.from_tarball(self.tarball_path) + + def install_image(self, package: Package): + """ Installs image from local tarball source. """ + + return self.docker.load(self.tarball_path) + + +class RegistrySource(PackageSource): + """ RegistrySource implements PackageSource + for packages that are pulled from registry. """ + + def __init__(self, + repository: str, + reference: str, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + super().__init__(database, + docker, + metadata_resolver) + self.repository = repository + self.reference = reference + + def get_metadata(self) -> Metadata: + """ Returns manifest read from registry. """ + + return self.metadata_resolver.from_registry(self.repository, + self.reference) + + def install_image(self, package: Package): + """ Installs image from registry. """ + + image_id = self.docker.pull(self.repository, self.reference) + if not package.entry.default_reference: + package.entry.default_reference = self.reference + return image_id + + +class LocalSource(PackageSource): + """ LocalSource accesses local docker library to retrieve manifest + but does not implement installation of the image. """ + + def __init__(self, + entry: PackageEntry, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + super().__init__(database, + docker, + metadata_resolver) + self.entry = entry + + def get_metadata(self) -> Metadata: + """ Returns manifest read from locally installed Docker. """ + + image = self.entry.image_id + + if self.entry.built_in: + # Built-in (installed not via sonic-package-manager) + # won't have image_id in database. Using their + # repository name as image. + image = f'{self.entry.repository}:latest' + + return self.metadata_resolver.from_local(image) + + def get_package(self) -> Package: + return Package(self.entry, self.get_metadata()) diff --git a/sonic_package_manager/utils.py b/sonic_package_manager/utils.py new file mode 100644 index 0000000000..410947dd24 --- /dev/null +++ b/sonic_package_manager/utils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +import keyword +import re + +from docker_image.reference import Reference + +DockerReference = Reference + + +def make_python_identifier(string): + """ + Takes an arbitrary string and creates a valid Python identifier. + + Identifiers must follow the convention outlined here: + https://docs.python.org/2/reference/lexical_analysis.html#identifiers + """ + + # create a working copy (and make it lowercase, while we're at it) + s = string.lower() + + # remove leading and trailing whitespace + s = s.strip() + + # Make spaces into underscores + s = re.sub('[\\s\\t\\n]+', '_', s) + + # Remove invalid characters + s = re.sub('[^0-9a-zA-Z_]', '', s) + + # Remove leading characters until we find a letter or underscore + s = re.sub('^[^a-zA-Z_]+', '', s) + + # Check that the string is not a python identifier + while s in keyword.kwlist: + if re.match(".*?_\d+$", s): + i = re.match(".*?_(\d+)$", s).groups()[0] + s = s.strip('_'+i) + '_'+str(int(i)+1) + else: + s += '_1' + + return s diff --git a/sonic_package_manager/version.py b/sonic_package_manager/version.py new file mode 100644 index 0000000000..e5a5623d3b --- /dev/null +++ b/sonic_package_manager/version.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +""" Version and helpers routines. """ + +import semver + +Version = semver.Version +VersionRange = semver.VersionRange + + +def version_to_tag(ver: Version) -> str: + """ Converts the version to Docker compliant tag string. """ + + return str(ver).replace('+', '_') + + +def tag_to_version(tag: str) -> Version: + """ Converts the version to Docker compliant tag string. """ + + try: + return Version.parse(tag.replace('_', '+')) + except ValueError as err: + raise ValueError(f'Failed to convert {tag} to version string: {err}') diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py new file mode 100644 index 0000000000..cee997596c --- /dev/null +++ b/tests/sonic_package_manager/conftest.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python + +from dataclasses import dataclass +from unittest import mock +from unittest.mock import Mock, MagicMock + +import pytest +from docker_image.reference import Reference + +from sonic_package_manager.database import PackageDatabase, PackageEntry +from sonic_package_manager.manager import DockerApi, PackageManager +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.metadata import Metadata, MetadataResolver +from sonic_package_manager.registry import RegistryResolver +from sonic_package_manager.version import Version +from sonic_package_manager.service_creator.creator import * + + +@pytest.fixture +def mock_docker_api(): + docker = MagicMock(DockerApi) + + @dataclass + class Image: + id: str + + @property + def attrs(self): + return {'RepoTags': [self.id]} + + def pull(repo, ref): + return Image(f'{repo}:{ref}') + + def load(filename): + return Image(filename) + + docker.pull = MagicMock(side_effect=pull) + docker.load = MagicMock(side_effect=load) + + yield docker + + +@pytest.fixture +def mock_registry_resolver(): + yield Mock(RegistryResolver) + + +@pytest.fixture +def mock_metadata_resolver(): + yield Mock(MetadataResolver) + + +@pytest.fixture +def mock_feature_registry(): + yield MagicMock() + + +@pytest.fixture +def mock_service_creator(): + yield Mock() + + +@pytest.fixture +def mock_sonic_db(): + yield Mock() + + +@pytest.fixture +def fake_metadata_resolver(): + class FakeMetadataResolver: + def __init__(self): + self.metadata_store = {} + self.add('docker-database', 'latest', 'database', '1.0.0') + self.add('docker-orchagent', 'latest', 'swss', '1.0.0', + components={ + 'libswsscommon': Version.parse('1.0.0'), + 'libsairedis': Version.parse('1.0.0') + } + ) + self.add('Azure/docker-test', '1.6.0', 'test-package', '1.6.0') + self.add('Azure/docker-test-2', '1.5.0', 'test-package-2', '1.5.0') + self.add('Azure/docker-test-2', '2.0.0', 'test-package-2', '2.0.0') + self.add('Azure/docker-test-3', 'latest', 'test-package-3', '1.6.0') + self.add('Azure/docker-test-3', '1.5.0', 'test-package-3', '1.5.0') + self.add('Azure/docker-test-3', '1.6.0', 'test-package-3', '1.6.0') + self.add('Azure/docker-test-4', '1.5.0', 'test-package-4', '1.5.0') + self.add('Azure/docker-test-5', '1.5.0', 'test-package-5', '1.5.0') + self.add('Azure/docker-test-5', '1.9.0', 'test-package-5', '1.9.0') + self.add('Azure/docker-test-6', '1.5.0', 'test-package-6', '1.5.0') + self.add('Azure/docker-test-6', '1.9.0', 'test-package-6', '1.9.0') + self.add('Azure/docker-test-6', '2.0.0', 'test-package-6', '2.0.0') + self.add('Azure/docker-test-6', 'latest', 'test-package-6', '1.5.0') + + def from_registry(self, repository: str, reference: str): + manifest = Manifest.marshal(self.metadata_store[repository][reference]['manifest']) + components = self.metadata_store[repository][reference]['components'] + return Metadata(manifest, components) + + def from_local(self, image: str): + ref = Reference.parse(image) + manifest = Manifest.marshal(self.metadata_store[ref['name']][ref['tag']]['manifest']) + components = self.metadata_store[ref['name']][ref['tag']]['components'] + return Metadata(manifest, components) + + def from_tarball(self, filepath: str) -> Manifest: + path, ref = filepath.split(':') + manifest = Manifest.marshal(self.metadata_store[path][ref]['manifest']) + components = self.metadata_store[path][ref]['components'] + return Metadata(manifest, components) + + def add(self, repo, reference, name, version, components=None): + repo_dict = self.metadata_store.setdefault(repo, {}) + repo_dict[reference] = { + 'manifest': { + 'package': { + 'version': version, + 'name': name, + 'base-os': {}, + }, + 'service': { + 'name': name, + } + }, + 'components': components or {}, + } + + yield FakeMetadataResolver() + + +@pytest.fixture +def fake_device_info(): + class FakeDeviceInfo: + def __init__(self): + self.multi_npu = True + self.num_npus = 1 + self.version_info = { + 'libswsscommon': '1.0.0', + } + + def is_multi_npu(self): + return self.multi_npu + + def get_num_npus(self): + return self.num_npus + + def get_sonic_version_info(self): + return self.version_info + + yield FakeDeviceInfo() + + +def add_package(content, metadata_resolver, repository, reference, **kwargs): + metadata = metadata_resolver.from_registry(repository, reference) + name = metadata.manifest['package']['name'] + version = metadata.manifest['package']['version'] + installed = kwargs.get('installed', False) + built_in = kwargs.get('built-in', False) + + if installed and not built_in and 'image_id' not in kwargs: + kwargs['image_id'] = f'{repository}:{reference}' + + if installed and 'version' not in kwargs: + kwargs['version'] = version + + content[name] = PackageEntry(name, repository, **kwargs) + + +@pytest.fixture +def fake_db(fake_metadata_resolver): + content = {} + + add_package( + content, + fake_metadata_resolver, + 'docker-database', + 'latest', + description='SONiC database service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'docker-orchagent', + 'latest', + description='SONiC switch state service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test', + '1.6.0', + description='SONiC Package Manager Test Package', + default_reference='1.6.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-2', + '1.5.0', + description='SONiC Package Manager Test Package #2', + default_reference='1.5.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-3', + '1.5.0', + description='SONiC Package Manager Test Package #3', + default_reference='1.5.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-5', + '1.9.0', + description='SONiC Package Manager Test Package #5', + default_reference='1.9.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-6', + '1.5.0', + description='SONiC Package Manager Test Package #6', + default_reference='1.5.0', + installed=False, + built_in=False + ) + + yield PackageDatabase(content) + + +@pytest.fixture +def fake_db_for_migration(fake_metadata_resolver): + content = {} + add_package( + content, + fake_metadata_resolver, + 'docker-database', + 'latest', + description='SONiC database service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'docker-orchagent', + 'latest', + description='SONiC switch state service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test', + '1.6.0', + description='SONiC Package Manager Test Package', + default_reference='1.6.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-2', + '2.0.0', + description='SONiC Package Manager Test Package #2', + default_reference='2.0.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-3', + '1.6.0', + description='SONiC Package Manager Test Package #3', + default_reference='1.6.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-4', + '1.5.0', + description='SONiC Package Manager Test Package #4', + default_reference='1.5.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-5', + '1.5.0', + description='SONiC Package Manager Test Package #5', + default_reference='1.5.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-6', + '2.0.0', + description='SONiC Package Manager Test Package #6', + default_reference='2.0.0', + installed=True, + built_in=False + ) + + yield PackageDatabase(content) + + +@pytest.fixture() +def sonic_fs(fs): + fs.create_file('/proc/1/root') + fs.create_dir(ETC_SONIC_PATH) + fs.create_dir(SYSTEMD_LOCATION) + fs.create_dir(DOCKER_CTL_SCRIPT_LOCATION) + fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) + fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_FILE_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_MGMT_SCRIPT_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, DOCKER_CTL_SCRIPT_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE)) + yield fs + + +@pytest.fixture(autouse=True) +def patch_pkgutil(): + with mock.patch('pkgutil.get_loader'): + yield + + +@pytest.fixture +def package_manager(mock_docker_api, + mock_registry_resolver, + mock_service_creator, + fake_metadata_resolver, + fake_db, + fake_device_info): + yield PackageManager(mock_docker_api, mock_registry_resolver, + fake_db, fake_metadata_resolver, + mock_service_creator, + fake_device_info, + MagicMock()) + + +@pytest.fixture +def anything(): + """ Fixture that returns Any object that can be used in + assert_called_*_with to match any object passed. """ + + class Any: + def __eq__(self, other): + return True + + yield Any() diff --git a/tests/sonic_package_manager/test_cli.py b/tests/sonic_package_manager/test_cli.py new file mode 100644 index 0000000000..695d8cba58 --- /dev/null +++ b/tests/sonic_package_manager/test_cli.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +from click.testing import CliRunner + +from sonic_package_manager import main + + +def test_show_changelog(package_manager, fake_metadata_resolver): + """ Test case for "sonic-package-manager package show changelog [NAME]" """ + + runner = CliRunner() + changelog = { + "1.0.0": { + "changes": ["Initial release"], + "author": "Stepan Blyshchak", + "email": "stepanb@nvidia.com", + "date": "Mon, 25 May 2020 12:24:30 +0300" + }, + "1.1.0": { + "changes": [ + "Added functionality", + "Bug fixes" + ], + "author": "Stepan Blyshchak", + "email": "stepanb@nvidia.com", + "date": "Fri, 23 Oct 2020 12:26:08 +0300" + } + } + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['changelog'] = changelog + + expected_output = """\ +1.0.0: + + • Initial release + + Stepan Blyshchak (stepanb@nvidia.com) Mon, 25 May 2020 12:24:30 +0300 + +1.1.0: + + • Added functionality + • Bug fixes + + Stepan Blyshchak (stepanb@nvidia.com) Fri, 23 Oct 2020 12:26:08 +0300 + +""" + + result = runner.invoke(main.show.commands['package'].commands['changelog'], + ['test-package'], obj=package_manager) + + assert result.exit_code == 0 + assert result.output == expected_output + + +def test_show_changelog_no_changelog(package_manager): + """ Test case for "sonic-package-manager package show changelog [NAME]" + when there is no changelog provided by package. """ + + runner = CliRunner() + result = runner.invoke(main.show.commands['package'].commands['changelog'], ['test-package'], obj=package_manager) + + assert result.exit_code == 1 + assert result.output == 'Failed to print package changelog: No changelog for package test-package\n' diff --git a/tests/sonic_package_manager/test_constraint.py b/tests/sonic_package_manager/test_constraint.py new file mode 100644 index 0000000000..1b34a301d2 --- /dev/null +++ b/tests/sonic_package_manager/test_constraint.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +from sonic_package_manager import version +from sonic_package_manager.constraint import PackageConstraint +from sonic_package_manager.version import Version, VersionRange + + +def test_constraint(): + package_constraint = PackageConstraint.parse('swss>1.0.0') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('0.9.1')) + assert package_constraint.constraint.allows(Version.parse('1.1.1')) + + +def test_constraint_range(): + package_constraint = PackageConstraint.parse('swss^1.2.0') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('1.1.1')) + assert package_constraint.constraint.allows(Version.parse('1.2.5')) + assert not package_constraint.constraint.allows(Version.parse('2.0.1')) + + +def test_constraint_strict(): + package_constraint = PackageConstraint.parse('swss==1.2.0') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('1.1.1')) + assert package_constraint.constraint.allows(Version.parse('1.2.0')) + + +def test_constraint_match(): + package_constraint = PackageConstraint.parse('swss==1.2*.*') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('1.1.1')) + assert package_constraint.constraint.allows(Version.parse('1.2.0')) + + +def test_constraint_multiple(): + package_constraint = PackageConstraint.parse('swss>1.2.0,<3.0.0,!=2.2.2') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('2.2.2')) + assert not package_constraint.constraint.allows(Version.parse('3.2.0')) + assert not package_constraint.constraint.allows(Version.parse('0.2.0')) + assert package_constraint.constraint.allows(Version.parse('2.2.3')) + assert package_constraint.constraint.allows(Version.parse('1.2.3')) + + +def test_constraint_only_name(): + package_constraint = PackageConstraint.parse('swss') + assert package_constraint.name == 'swss' + assert package_constraint.constraint == VersionRange() + + +def test_constraint_from_dict(): + package_constraint = PackageConstraint.parse({ + 'name': 'swss', + 'version': '^1.0.0', + 'components': { + 'libswsscommon': '^1.1.0', + }, + }) + assert package_constraint.name == 'swss' + assert package_constraint.constraint.allows(Version.parse('1.0.0')) + assert not package_constraint.constraint.allows(Version.parse('2.0.0')) + assert package_constraint.components['libswsscommon'].allows(Version.parse('1.2.0')) + assert not package_constraint.components['libswsscommon'].allows(Version.parse('1.0.0')) + assert not package_constraint.components['libswsscommon'].allows(Version.parse('2.0.0')) + + +def test_version_to_tag(): + assert version.version_to_tag(Version.parse('1.0.0-rc0')) == '1.0.0-rc0' + assert version.version_to_tag(Version.parse('1.0.0-rc0+152')) == '1.0.0-rc0_152' + + +def test_tag_to_version(): + assert str(version.tag_to_version('1.0.0-rc0_152')) == '1.0.0-rc0+152' + assert str(version.tag_to_version('1.0.0-rc0')) == '1.0.0-rc0' diff --git a/tests/sonic_package_manager/test_database.py b/tests/sonic_package_manager/test_database.py new file mode 100644 index 0000000000..1c565d6f4c --- /dev/null +++ b/tests/sonic_package_manager/test_database.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +import pytest + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.errors import ( + PackageNotFoundError, + PackageAlreadyExistsError, + PackageManagerError +) +from sonic_package_manager.version import Version + + +def test_database_get_package(fake_db): + swss_package = fake_db.get_package('swss') + assert swss_package.installed + assert swss_package.built_in + assert swss_package.repository == 'docker-orchagent' + assert swss_package.default_reference == '1.0.0' + assert swss_package.version == Version(1, 0, 0) + + +def test_database_get_package_not_builtin(fake_db): + test_package = fake_db.get_package('test-package') + assert not test_package.installed + assert not test_package.built_in + assert test_package.repository == 'Azure/docker-test' + assert test_package.default_reference == '1.6.0' + assert test_package.version is None + + +def test_database_get_package_not_existing(fake_db): + with pytest.raises(PackageNotFoundError): + fake_db.get_package('abc') + + +def test_database_add_package(fake_db): + fake_db.add_package('test-package-99', 'Azure/docker-test-99') + test_package = fake_db.get_package('test-package-99') + assert not test_package.installed + assert not test_package.built_in + assert test_package.repository == 'Azure/docker-test-99' + assert test_package.default_reference is None + assert test_package.version is None + + +def test_database_add_package_existing(fake_db): + with pytest.raises(PackageAlreadyExistsError): + fake_db.add_package('swss', 'Azure/docker-orchagent') + + +def test_database_update_package(fake_db): + test_package = fake_db.get_package('test-package-2') + test_package.installed = True + test_package.version = Version(1, 2, 3) + fake_db.update_package(test_package) + test_package = fake_db.get_package('test-package-2') + assert test_package.installed + assert test_package.version == Version(1, 2, 3) + + +def test_database_update_package_non_existing(fake_db): + test_package = PackageEntry('abc', 'abc') + with pytest.raises(PackageNotFoundError): + fake_db.update_package(test_package) + + +def test_database_remove_package(fake_db): + fake_db.remove_package('test-package') + assert not fake_db.has_package('test-package') + + +def test_database_remove_package_non_existing(fake_db): + with pytest.raises(PackageNotFoundError): + fake_db.remove_package('non-existing-package') + + +def test_database_remove_package_installed(fake_db): + with pytest.raises(PackageManagerError, + match='Package test-package-3 is installed, ' + 'uninstall it first'): + fake_db.remove_package('test-package-3') + + +def test_database_remove_package_built_in(fake_db): + with pytest.raises(PackageManagerError, + match='Package swss is built-in, ' + 'cannot remove it'): + fake_db.remove_package('swss') diff --git a/tests/sonic_package_manager/test_manager.py b/tests/sonic_package_manager/test_manager.py new file mode 100644 index 0000000000..c7eb1ca7ac --- /dev/null +++ b/tests/sonic_package_manager/test_manager.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python + +from unittest.mock import Mock, call + +import pytest + +from sonic_package_manager.errors import * +from sonic_package_manager.version import Version + + +def test_installation_not_installed(package_manager): + package_manager.install('test-package') + package = package_manager.get_installed_package('test-package') + assert package.installed + assert package.entry.default_reference == '1.6.0' + + +def test_installation_already_installed(package_manager): + package_manager.install('test-package') + with pytest.raises(PackageManagerError, + match='1.6.0 is already installed'): + package_manager.install('test-package') + + +def test_installation_dependencies(package_manager, fake_metadata_resolver, mock_docker_api): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['depends'] = ['swss^2.0.0'] + with pytest.raises(PackageInstallationError, + match='Package test-package requires swss>=2.0.0,<3.0.0 ' + 'but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_dependencies_missing_package(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['depends'] = ['missing-package>=1.0.0'] + with pytest.raises(PackageInstallationError, + match='Package test-package requires ' + 'missing-package>=1.0.0 but it is not installed'): + package_manager.install('test-package') + + +def test_installation_dependencies_satisfied(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['depends'] = ['database>=1.0.0', 'swss>=1.0.0'] + package_manager.install('test-package') + + +def test_installation_components_dependencies_satisfied(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('1.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + 'components': { + 'libswsscommon': '^1.0.0', + }, + }, + ] + package_manager.install('test-package') + + +def test_installation_components_dependencies_not_satisfied(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('1.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + 'components': { + 'libswsscommon': '^1.1.0', + }, + }, + ] + with pytest.raises(PackageInstallationError, + match='Package test-package requires libswsscommon >=1.1.0,<2.0.0 ' + 'in package swss>=1.0.0 but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_components_dependencies_implicit(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('2.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + }, + ] + with pytest.raises(PackageInstallationError, + match='Package test-package requires libswsscommon >=2.1.0,<3.0.0 ' + 'in package swss>=1.0.0 but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_components_dependencies_explicitely_allowed(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('2.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + 'components': { + 'libswsscommon': '>=1.0.0,<3.0.0' + } + }, + ] + package_manager.install('test-package') + + +def test_installation_breaks(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['breaks'] = ['swss^1.0.0'] + with pytest.raises(PackageInstallationError, + match='Package test-package conflicts with ' + 'swss>=1.0.0,<2.0.0 but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_breaks_missing_package(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['breaks'] = ['missing-package^1.0.0'] + package_manager.install('test-package') + + +def test_installation_breaks_not_installed_package(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['breaks'] = ['test-package-2^1.0.0'] + package_manager.install('test-package') + + +def test_installation_base_os_constraint(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['base-os']['libswsscommon'] = '>=2.0.0' + with pytest.raises(PackageSonicRequirementError, + match='Package test-package requires base OS component libswsscommon ' + 'version >=2.0.0 while the installed version is 1.0.0'): + package_manager.install('test-package') + + +def test_installation_base_os_constraint_satisfied(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['base-os']['libswsscommon'] = '>=1.0.0' + package_manager.install('test-package') + + +def test_installation_cli_plugin(package_manager, fake_metadata_resolver, anything): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['cli']= {'show': '/cli/plugin.py'} + package_manager._install_cli_plugins = Mock() + package_manager.install('test-package') + package_manager._install_cli_plugins.assert_called_once_with(anything) + + +def test_installation_cli_plugin_skipped(package_manager, fake_metadata_resolver, anything): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['cli']= {'show': '/cli/plugin.py'} + package_manager._install_cli_plugins = Mock() + package_manager.install('test-package', skip_host_plugins=True) + package_manager._install_cli_plugins.assert_not_called() + + +def test_installation_cli_plugin_is_mandatory_but_skipped(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['cli']= {'mandatory': True} + with pytest.raises(PackageManagerError, + match='CLI is mandatory for package test-package but ' + 'it was requested to be not installed'): + package_manager.install('test-package', skip_host_plugins=True) + + +def test_installation(package_manager, mock_docker_api, anything): + package_manager.install('test-package') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', '1.6.0') + + +def test_installation_using_reference(package_manager, + fake_metadata_resolver, + mock_docker_api, + anything): + ref = 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + fake_metadata_resolver.metadata_store['Azure/docker-test'][ref] = metadata + + package_manager.install(f'test-package@{ref}') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', f'{ref}') + + +def test_manager_installation_tag(package_manager, + mock_docker_api, + anything): + package_manager.install(f'test-package=1.6.0') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', '1.6.0') + + +def test_installation_from_file(package_manager, mock_docker_api, sonic_fs): + sonic_fs.create_file('Azure/docker-test:1.6.0') + package_manager.install(tarball='Azure/docker-test:1.6.0') + mock_docker_api.load.assert_called_once_with('Azure/docker-test:1.6.0') + + +def test_installation_from_registry(package_manager, mock_docker_api): + package_manager.install(repotag='Azure/docker-test:1.6.0') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', '1.6.0') + + +def test_installation_from_registry_using_digest(package_manager, mock_docker_api, fake_metadata_resolver): + ref = 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + fake_metadata_resolver.metadata_store['Azure/docker-test'][ref] = metadata + + ref = 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + package_manager.install(repotag=f'Azure/docker-test@{ref}') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', ref) + + +def test_installation_from_file_known_package(package_manager, fake_db, sonic_fs): + repository = fake_db.get_package('test-package').repository + sonic_fs.create_file('Azure/docker-test:1.6.0') + package_manager.install(tarball='Azure/docker-test:1.6.0') + # locally installed package does not override already known package repository + assert repository == fake_db.get_package('test-package').repository + + +def test_installation_from_file_unknown_package(package_manager, fake_db, sonic_fs): + assert not fake_db.has_package('test-package-4') + sonic_fs.create_file('Azure/docker-test-4:1.5.0') + package_manager.install(tarball='Azure/docker-test-4:1.5.0') + assert fake_db.has_package('test-package-4') + + +def test_upgrade_from_file_known_package(package_manager, fake_db, sonic_fs): + repository = fake_db.get_package('test-package-6').repository + # install older version from repository + package_manager.install('test-package-6=1.5.0') + # upgrade from file + sonic_fs.create_file('Azure/docker-test-6:2.0.0') + package_manager.install(tarball='Azure/docker-test-6:2.0.0') + # locally installed package does not override already known package repository + assert repository == fake_db.get_package('test-package-6').repository + + +def test_installation_non_default_owner(package_manager, anything, mock_service_creator): + package_manager.install('test-package', default_owner='kube') + mock_service_creator.create.assert_called_once_with(anything, state='disabled', owner='kube') + + +def test_installation_enabled(package_manager, anything, mock_service_creator): + package_manager.install('test-package', enable=True) + mock_service_creator.create.assert_called_once_with(anything, state='enabled', owner='local') + + +def test_installation_fault(package_manager, mock_docker_api, mock_service_creator): + # make 'tag' to fail + mock_service_creator.create = Mock(side_effect=Exception('Failed to create service')) + # 'rmi' is called on rollback + mock_docker_api.rmi = Mock(side_effect=Exception('Failed to remove image')) + # assert that the rollback does not hide the original failure. + with pytest.raises(Exception, match='Failed to create service'): + package_manager.install('test-package') + mock_docker_api.rmi.assert_called_once() + + +def test_manager_installation_version_range(package_manager): + with pytest.raises(PackageManagerError, + match='Can only install specific version. ' + 'Use only following expression "test-package=" ' + 'to install specific version'): + package_manager.install(f'test-package>=1.6.0') + + +def test_manager_upgrade(package_manager, sonic_fs): + package_manager.install('test-package-6=1.5.0') + package = package_manager.get_installed_package('test-package-6') + + package_manager.install('test-package-6=2.0.0') + upgraded_package = package_manager.get_installed_package('test-package-6') + assert upgraded_package.entry.version == Version(2, 0, 0) + assert upgraded_package.entry.default_reference == package.entry.default_reference + + +def test_manager_package_reset(package_manager, sonic_fs): + package_manager.install('test-package-6=1.5.0') + package_manager.install('test-package-6=2.0.0') + + package_manager.reset('test-package-6') + upgraded_package = package_manager.get_installed_package('test-package-6') + assert upgraded_package.entry.version == Version(1, 5, 0) + + +def test_manager_migration(package_manager, fake_db_for_migration): + package_manager.install = Mock() + package_manager.migrate_packages(fake_db_for_migration) + + package_manager.install.assert_has_calls([ + # test-package-3 was installed but there is a newer version installed + # in fake_db_for_migration, asserting for upgrade + call('test-package-3=1.6.0'), + # test-package-4 was not present in DB at all, but it is present and installed in + # fake_db_for_migration, thus asserting that it is going to be installed. + call('test-package-4=1.5.0'), + # test-package-5 1.5.0 was installed in fake_db_for_migration but the default + # in current db is 1.9.0, assert that migration will install the newer version. + call('test-package-5=1.9.0'), + # test-package-6 2.0.0 was installed in fake_db_for_migration but the default + # in current db is 1.5.0, assert that migration will install the newer version. + call('test-package-6=2.0.0')], + any_order=True + ) diff --git a/tests/sonic_package_manager/test_manifest.py b/tests/sonic_package_manager/test_manifest.py new file mode 100644 index 0000000000..efdcc558ab --- /dev/null +++ b/tests/sonic_package_manager/test_manifest.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python + +import pytest + +from sonic_package_manager.constraint import ComponentConstraints +from sonic_package_manager.manifest import Manifest, ManifestError +from sonic_package_manager.version import VersionRange + + +def test_manifest_v1_defaults(): + manifest = Manifest.marshal({'package': {'name': 'test', + 'version': '1.0.0'}, + 'service': {'name': 'test'}}) + assert manifest['package']['depends'] == [] + assert manifest['package']['breaks'] == [] + assert manifest['package']['base-os'] == ComponentConstraints() + assert not manifest['service']['asic-service'] + assert manifest['service']['host-service'] + + +def test_manifest_v1_invalid_version(): + with pytest.raises(ManifestError): + Manifest.marshal({'package': {'version': 'abc', 'name': 'test'}, + 'service': {'name': 'test'}}) + + +def test_manifest_v1_invalid_package_constraint(): + with pytest.raises(ManifestError): + Manifest.marshal({'package': {'name': 'test', 'version': '1.0.0', + 'depends': ['swss>a']}, + 'service': {'name': 'test'}}) + + +def test_manifest_v1_service_spec(): + manifest = Manifest.marshal({'package': {'name': 'test', + 'version': '1.0.0'}, + 'service': {'name': 'test', 'asic-service': True}}) + assert manifest['service']['asic-service'] + + +def test_manifest_v1_mounts(): + manifest = Manifest.marshal({'version': '1.0.0', 'package': {'name': 'test', + 'version': '1.0.0'}, + 'service': {'name': 'cpu-report'}, + 'container': {'privileged': True, + 'mounts': [{'source': 'a', 'target': 'b', 'type': 'bind'}]}}) + assert manifest['container']['mounts'][0]['source'] == 'a' + assert manifest['container']['mounts'][0]['target'] == 'b' + assert manifest['container']['mounts'][0]['type'] == 'bind' + + +def test_manifest_v1_mounts_invalid(): + with pytest.raises(ManifestError): + Manifest.marshal({'version': '1.0.0', 'package': {'name': 'test', 'version': '1.0.0'}, + 'service': {'name': 'cpu-report'}, + 'container': {'privileged': True, + 'mounts': [{'not-source': 'a', 'target': 'b', 'type': 'bind'}]}}) + + +def test_manifest_v1_unmarshal(): + manifest_json_input = {'package': {'name': 'test', 'version': '1.0.0', + 'depends': [ + { + 'name': 'swss', + 'version': '>1.0.0', + 'components': {}, + } + ]}, + 'service': {'name': 'test'}} + manifest = Manifest.marshal(manifest_json_input) + manifest_json = manifest.unmarshal() + for key, section in manifest_json_input.items(): + for field, value in section.items(): + assert manifest_json[key][field] == value diff --git a/tests/sonic_package_manager/test_metadata.py b/tests/sonic_package_manager/test_metadata.py new file mode 100644 index 0000000000..aee2f49428 --- /dev/null +++ b/tests/sonic_package_manager/test_metadata.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +import contextlib +from unittest.mock import Mock, MagicMock + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.errors import MetadataError +from sonic_package_manager.metadata import MetadataResolver +from sonic_package_manager.version import Version + + +def test_metadata_resolver_local(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # it raises exception because mock manifest is not a valid manifest + # but this is not a test objective, so just suppress the error. + with contextlib.suppress(MetadataError): + metadata_resolver.from_local('image') + mock_docker_api.labels.assert_called_once() + + +def test_metadata_resolver_remote(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + mock_registry = MagicMock() + mock_registry.manifest = MagicMock(return_value={'config': {'digest': 'some-digest'}}) + + def return_mock_registry(repository): + return mock_registry + + mock_registry_resolver.get_registry_for = Mock(side_effect=return_mock_registry) + # it raises exception because mock manifest is not a valid manifest + # but this is not a test objective, so just suppress the error. + with contextlib.suppress(MetadataError): + metadata_resolver.from_registry('test-repository', '1.2.0') + mock_registry_resolver.get_registry_for.assert_called_once_with('test-repository') + mock_registry.manifest.assert_called_once_with('test-repository', '1.2.0') + mock_registry.blobs.assert_called_once_with('test-repository', 'some-digest') + mock_docker_api.labels.assert_not_called() diff --git a/tests/sonic_package_manager/test_reference.py b/tests/sonic_package_manager/test_reference.py new file mode 100644 index 0000000000..043b66ddd5 --- /dev/null +++ b/tests/sonic_package_manager/test_reference.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +import pytest + +from sonic_package_manager.reference import PackageReference + + +def test_reference(): + package_constraint = PackageReference.parse( + 'swss@sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + ) + assert package_constraint.name == 'swss' + assert package_constraint.reference == 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + + +def test_reference_invalid(): + with pytest.raises(ValueError): + PackageReference.parse('swssfdsf') diff --git a/tests/sonic_package_manager/test_registry.py b/tests/sonic_package_manager/test_registry.py new file mode 100644 index 0000000000..0d82499df3 --- /dev/null +++ b/tests/sonic_package_manager/test_registry.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python + +from sonic_package_manager.registry import RegistryResolver + + +def test_get_registry_for(): + resolver = RegistryResolver() + registry = resolver.get_registry_for('debian') + assert registry is resolver.DockerHubRegistry + registry = resolver.get_registry_for('Azure/sonic') + assert registry is resolver.DockerHubRegistry + registry = resolver.get_registry_for('registry-server:5000/docker') + assert registry.url == 'https://registry-server:5000' + registry = resolver.get_registry_for('registry-server.com/docker') + assert registry.url == 'https://registry-server.com' diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py new file mode 100644 index 0000000000..fec8de600c --- /dev/null +++ b/tests/sonic_package_manager/test_service_creator.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +import os +from unittest.mock import Mock, MagicMock + +import pytest + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.metadata import Metadata +from sonic_package_manager.package import Package +from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.feature import FeatureRegistry + + +@pytest.fixture +def manifest(): + return Manifest.marshal({ + 'package': { + 'name': 'test', + 'version': '1.0.0', + }, + 'service': { + 'name': 'test', + 'requires': ['database'], + 'after': ['database', 'swss', 'syncd'], + 'before': ['ntp-config'], + 'dependent-of': ['swss'], + 'asic-service': False, + 'host-service': True, + }, + 'container': { + 'privileged': True, + 'volumes': [ + '/etc/sonic:/etc/sonic:ro' + ] + } + }) + + +def test_service_creator(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert sonic_fs.exists(os.path.join(ETC_SONIC_PATH, 'swss_dependent')) + assert sonic_fs.exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, 'test.sh')) + assert sonic_fs.exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, 'test.sh')) + assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.service')) + + +def test_service_creator_with_timer_unit(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) + + manifest['service']['delayed'] = True + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) + + +def test_service_creator_with_debug_dump(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + + manifest['package']['debug-dump'] = '/some/command' + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + + +def test_service_creator_initial_config(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + mock_table = Mock() + mock_table.get = Mock(return_value=(True, (('field_2', 'original_value_2'),))) + mock_sonic_db.initial_table = Mock(return_value=mock_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_table) + mock_sonic_db.running_table = Mock(return_value=mock_table) + + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + + manifest['package']['init-cfg'] = { + 'TABLE_A': { + 'key_a': { + 'field_1': 'value_1', + 'field_2': 'value_2' + }, + }, + } + package = Package(entry, Metadata(manifest)) + + creator.create(package) + mock_table.set.assert_called_with('key_a', [('field_1', 'value_1'), + ('field_2', 'original_value_2')]) + + creator.remove(package) + mock_table._del.assert_called_with('key_a') + + +def test_feature_registration(mock_sonic_db, manifest): + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'local'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'False'), + ]) + + +def test_feature_registration_with_timer(mock_sonic_db, manifest): + manifest['service']['delayed'] = True + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'local'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'True'), + ]) + + +def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest, owner='kube') + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'kube'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'False'), + ]) diff --git a/tests/sonic_package_manager/test_utils.py b/tests/sonic_package_manager/test_utils.py new file mode 100644 index 0000000000..c4d8b15840 --- /dev/null +++ b/tests/sonic_package_manager/test_utils.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python + +from sonic_package_manager import utils + + +def test_make_python_identifier(): + assert utils.make_python_identifier('-some-package name').isidentifier() + assert utils.make_python_identifier('01 leading digit').isidentifier() From 2e09b2202283ff350a52ee5b0937240851cb298d Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Mon, 3 May 2021 23:57:39 +0800 Subject: [PATCH 2/9] Handle the new db version which mellanox_buffer_migrator isn't interested (#1566) Enhancement: handle the case that no buffer change in the latest database version Current, the following two versions are the same: - The latest version changed by mellanox_buffer_migrator - The latest version in CONFIG_DB That won't be true if another part in CONFIG_DB is updated. In that case, the latest version in CONFIG_DB will be greater than the latest version in mellanox_buffer_migrator. However, this can break the buffer migrator unit test: - The db_migrator will always migrate the database to the latest version - The config database version check will fail in case the latest version in the config database doesn't match that defined in the buffer migrator. This is to support this case. Signed-off-by: Stephen Sun --- tests/db_migrator_test.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 87cf2c8c11..bbff2a7666 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -76,6 +76,16 @@ def check_appl_db(self, result, expected): for key in keys: assert expected.get_all(expected.APPL_DB, key) == result.get_all(result.APPL_DB, key) + def advance_version_for_expected_database(self, migrated_db, expected_db): + # In case there are new db versions greater than the latest one that mellanox buffer migrator is interested, + # we just advance the database version in the expected database to make the test pass + expected_dbversion = expected_db.get_entry('VERSIONS', 'DATABASE') + dbmgtr_dbversion = migrated_db.get_entry('VERSIONS', 'DATABASE') + if expected_dbversion and dbmgtr_dbversion: + if expected_dbversion['VERSION'] == self.version_list[-1] and dbmgtr_dbversion['VERSION'] > expected_dbversion['VERSION']: + expected_dbversion['VERSION'] = dbmgtr_dbversion['VERSION'] + expected_db.set_entry('VERSIONS', 'DATABASE', expected_dbversion) + @pytest.mark.parametrize('scenario', ['empty-config', 'non-default-config', @@ -93,6 +103,7 @@ def test_mellanox_buffer_migrator_negative_cold_reboot(self, scenario): dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() expected_db = self.mock_dedicated_config_db(db_after_migrate) + self.advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb) self.check_config_db(dbmgtr.configDB, expected_db.cfgdb) assert not dbmgtr.mellanox_buffer_migrator.is_buffer_config_default @@ -119,8 +130,6 @@ def test_mellanox_buffer_migrator_for_cold_reboot(self, sku_version, topo): sku, start_version = sku_version version = start_version start_index = self.version_list.index(start_version) - # Eventually, the config db should be migrated to the latest version - expected_db = self.mock_dedicated_config_db(self.make_db_name_by_sku_topo_version(sku, topo, self.version_list[-1])) # start_version represents the database version from which the SKU is supported # For each SKU, @@ -130,6 +139,9 @@ def test_mellanox_buffer_migrator_for_cold_reboot(self, sku_version, topo): import db_migrator dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() + # Eventually, the config db should be migrated to the latest version + expected_db = self.mock_dedicated_config_db(self.make_db_name_by_sku_topo_version(sku, topo, self.version_list[-1])) + self.advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb) self.check_config_db(dbmgtr.configDB, expected_db.cfgdb) assert dbmgtr.mellanox_buffer_migrator.is_buffer_config_default @@ -145,6 +157,7 @@ def mellanox_buffer_migrator_warm_reboot_runner(self, input_config_db, input_app import db_migrator dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() + self.advance_version_for_expected_database(dbmgtr.configDB, expected_config_db.cfgdb) assert dbmgtr.mellanox_buffer_migrator.is_buffer_config_default == is_buffer_config_default_expected self.check_config_db(dbmgtr.configDB, expected_config_db.cfgdb) self.check_appl_db(dbmgtr.appDB, expected_appl_db) @@ -173,6 +186,7 @@ def test_mellanox_buffer_migrator_for_warm_reboot(self, sku, topo): self.mellanox_buffer_migrator_warm_reboot_runner(input_db_name, input_db_name, expected_db_name, expected_db_name, True) def test_mellanox_buffer_migrator_negative_nondefault_for_warm_reboot(self): + device_info.get_sonic_version_info = get_sonic_version_info_mlnx expected_config_db = 'non-default-config-expected' expected_appl_db = 'non-default-expected' input_config_db = 'non-default-config-input' From 912076658883513214b769e2a670b7b2303cb67f Mon Sep 17 00:00:00 2001 From: Qi Luo Date: Mon, 3 May 2021 09:03:49 -0700 Subject: [PATCH 3/9] Relax the install_requires, no need to exact version as long as there are no broken changes with future versions (#1530) #### What I did Fixes https://github.com/Azure/sonic-buildimage/issues/7152 #### How I did it Relax the install_requires --- setup.py | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/setup.py b/setup.py index 15f93b46f7..b8b6d07229 100644 --- a/setup.py +++ b/setup.py @@ -161,32 +161,32 @@ }, install_requires=[ 'click==7.0', - 'click-log==0.3.2', - 'docker==4.4.4', - 'docker-image-py==0.1.10', - 'filelock==3.0.12', - 'enlighten==1.8.0', - 'ipaddress==1.0.23', - 'jinja2==2.11.3', - 'jsondiff==1.2.0', - 'jsonpatch==1.32.0', - 'm2crypto==0.31.0', - 'natsort==6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 - 'netaddr==0.8.0', - 'netifaces==0.10.7', - 'pexpect==4.8.0', - 'poetry-semver==0.1.0', - 'prettyprinter==0.18.0', - 'pyroute2==0.5.14', - 'requests==2.25.0', + 'click-log>=0.3.2', + 'docker>=4.4.4', + 'docker-image-py>=0.1.10', + 'filelock>=3.0.12', + 'enlighten>=1.8.0', + 'ipaddress>=1.0.23', + 'jinja2>=2.11.3', + 'jsondiff>=1.2.0', + 'jsonpatch>=1.32.0', + 'm2crypto>=0.31.0', + 'natsort>=6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 + 'netaddr>=0.8.0', + 'netifaces>=0.10.7', + 'pexpect>=4.8.0', + 'poetry-semver>=0.1.0', + 'prettyprinter>=0.18.0', + 'pyroute2>=0.5.14', + 'requests>=2.25.0', 'sonic-config-engine', 'sonic-platform-common', 'sonic-py-common', 'sonic-yang-mgmt', 'swsssdk>=2.0.1', - 'tabulate==0.8.2', - 'www-authenticate==0.9.2', - 'xmltodict==0.12.0', + 'tabulate>=0.8.2', + 'www-authenticate>=0.9.2', + 'xmltodict>=0.12.0', ], setup_requires= [ 'pytest-runner', From cbe21599e7e1371c70370a147bca0d4be401c56f Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Tue, 4 May 2021 05:20:34 +0300 Subject: [PATCH 4/9] [vnet] Add "vnet_route_check" script (#1300) * [vnet] Add "vnet_route_check" script * [vnet_route_check.py]: tool that verifies VNET routes consistancy between SONiC and vendor SDK DBs. Signed-off-by: Volodymyr Samotiy --- scripts/vnet_route_check.py | 363 +++++++++++++++++++++++++++++++++ setup.py | 1 + tests/vnet_route_check_test.py | 325 +++++++++++++++++++++++++++++ 3 files changed, 689 insertions(+) create mode 100755 scripts/vnet_route_check.py create mode 100644 tests/vnet_route_check_test.py diff --git a/scripts/vnet_route_check.py b/scripts/vnet_route_check.py new file mode 100755 index 0000000000..010e953451 --- /dev/null +++ b/scripts/vnet_route_check.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python + +import os +import sys +import json +import syslog +from swsscommon import swsscommon + +''' vnet_route_check.py: tool that verifies VNET routes consistancy between SONiC and vendor SDK DBs. + +Logically VNET route verification logic consists of 3 parts: +1. Get VNET routes entries that are missed in ASIC_DB but present in APP_DB. +2. Get VNET routes entries that are missed in APP_DB but present in ASIC_DB. +3. Get VNET routes entries that are missed in SDK but present in ASIC_DB. + +Returns 0 if there is no inconsistancy found and all VNET routes are aligned in all DBs. +Returns -1 if there is incosistancy found and prints differences between DBs in JSON format to standart output. + +Format of differences output: +{ + "results": { + "missed_in_asic_db_routes": { + "": { + "routes": [ + "/" + ] + } + }, + "missed_in_app_db_routes": { + "": { + "routes": [ + "/" + ] + } + }, + "missed_in_sdk_routes": { + "": { + "routes": [ + "/" + ] + } + } + } +} +''' + + +RC_OK = 0 +RC_ERR = -1 + + +report_level = syslog.LOG_ERR +write_to_syslog = True + + +def set_level(lvl, log_to_syslog): + global report_level + global write_to_syslog + + write_to_syslog = log_to_syslog + report_level = lvl + + +def print_message(lvl, *args): + if (lvl <= report_level): + msg = "" + for arg in args: + msg += " " + str(arg) + print(msg) + if write_to_syslog: + syslog.syslog(lvl, msg) + + +def check_vnet_cfg(): + ''' Returns True if VNET is configured in APP_DB or False if no VNET configuration. + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + vnet_db_keys = swsscommon.Table(db, 'VNET_TABLE').getKeys() + + return True if vnet_db_keys else False + + +def get_vnet_intfs(): + ''' Returns dictionary of VNETs and related VNET interfaces. + Format: { : [ ] } + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + intfs_table = swsscommon.Table(db, 'INTF_TABLE') + intfs_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() + + vnet_intfs = {} + + for intf_key in intfs_keys: + intf_attrs = intfs_table.get(intf_key)[1] + + if 'vnet_name' in intf_attrs: + vnet_name = intf_attrs['vnet_name'] + if vnet_name in vnet_intfs: + vnet_intfs[vnet_name].append(intf_key) + else: + vnet_intfs[vnet_name] = [intf_key] + + return vnet_intfs + + +def get_all_rifs_oids(): + ''' Returns dictionary of all router interfaces and their OIDs. + Format: { : } + ''' + db = swsscommon.DBConnector('COUNTERS_DB', 0) + + rif_table = swsscommon.Table(db, 'COUNTERS_RIF_NAME_MAP') + rif_keys = rif_table.getKeys() + + rif_name_oid_map = {} + + for rif_name in rif_keys: + rif_name_oid_map[rif_name] = rif_table.get(rif_name)[1] + + return rif_name_oid_map + + +def get_vnet_rifs_oids(): + ''' Returns dictionary of VNET interfaces and their OIDs. + Format: { : } + ''' + vnet_intfs = get_vnet_intfs() + intfs_oids = get_all_rifs_oids() + + vnet_intfs = [vnet_intfs[k] for k in vnet_intfs] + vnet_intfs = [val for sublist in vnet_intfs for val in sublist] + + vnet_rifs_oids_map = {} + + for intf_name in intfs_oids or {}: + if intf_name in vnet_intfs: + vnet_rifs_oids_map[intf_name] = intfs_oids[intf_name] + + return vnet_rifs_oids_map + + +def get_vrf_entries(): + ''' Returns dictionary of VNET interfaces and corresponding VRF OIDs. + Format: { : } + ''' + db = swsscommon.DBConnector('ASIC_DB', 0) + rif_table = swsscommon.Table(db, 'ASIC_STATE') + + vnet_rifs_oids = get_vnet_rifs_oids() + + rif_vrf_map = {} + for vnet_rif_name in vnet_rifs_oids: + + db_keys = rif_table.getKeys() + + for db_key in db_keys: + if 'SAI_OBJECT_TYPE_ROUTER_INTERFACE' in db_key: + rif_attrs = rif_table.get(db_key)[1] + rif_vrf_map[vnet_rif_name] = rif_attrs['SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID'] + + return rif_vrf_map + + +def filter_out_vnet_ip2me_routes(vnet_routes): + ''' Filters out IP2ME routes from the provided dictionary with VNET routes + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + all_rifs_db_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() + vnet_intfs = get_vnet_intfs() + + vnet_intfs = [vnet_intfs[k] for k in vnet_intfs] + vnet_intfs = [val for sublist in vnet_intfs for val in sublist] + + vnet_ip2me_routes = [] + for rif in all_rifs_db_keys: + rif_attrs = rif.split(':') + # Skip RIF entries without IP prefix and prefix length (they have only one attribute - RIF name) + if len(rif_attrs) == 1: + continue + + # rif_attrs[0] - RIF name + # rif_attrs[1] - IP prefix and prefix legth + # IP2ME routes have '/32' prefix length so replace it and add to the list + if rif_attrs[0] in vnet_intfs: + vnet_ip2me_routes.append(rif_attrs[1].replace('/24', '/32')) + + for vnet, vnet_attrs in vnet_routes.items(): + for route in vnet_attrs['routes']: + if route in vnet_ip2me_routes: + vnet_attrs['routes'].remove(route) + + if not vnet_attrs['routes']: + vnet_routes.pop(vnet) + + +def get_vnet_routes_from_app_db(): + ''' Returns dictionary of VNET routes configured per each VNET in APP_DB. + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + vnet_intfs = get_vnet_intfs() + vnet_vrfs = get_vrf_entries() + + vnet_route_table = swsscommon.Table(db, 'VNET_ROUTE_TABLE') + vnet_route_tunnel_table = swsscommon.Table(db, 'VNET_ROUTE_TUNNEL_TABLE') + + vnet_routes_db_keys = vnet_route_table.getKeys() + vnet_route_tunnel_table.getKeys() + + vnet_routes = {} + + for vnet_route_db_key in vnet_routes_db_keys: + vnet_route_list = vnet_route_db_key.split(':') + vnet_name = vnet_route_list[0] + vnet_route = vnet_route_list[1] + + if vnet_name not in vnet_routes: + vnet_routes[vnet_name] = {} + vnet_routes[vnet_name]['routes'] = [] + + intf = vnet_intfs[vnet_name][0] + vnet_routes[vnet_name]['vrf_oid'] = vnet_vrfs.get(intf, 'None') + + vnet_routes[vnet_name]['routes'].append(vnet_route) + + return vnet_routes + + +def get_vnet_routes_from_asic_db(): + ''' Returns dictionary of VNET routes configured per each VNET in ASIC_DB. + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + db = swsscommon.DBConnector('ASIC_DB', 0) + + tbl = swsscommon.Table(db, 'ASIC_STATE') + + vnet_vrfs = get_vrf_entries() + vnet_vrfs_oids = [vnet_vrfs[k] for k in vnet_vrfs] + + vnet_intfs = get_vnet_intfs() + + vrf_oid_to_vnet_map = {} + + for vnet_name, vnet_rifs in vnet_intfs.items(): + for vnet_rif, vrf_oid in vnet_vrfs.items(): + if vnet_rif in vnet_rifs: + vrf_oid_to_vnet_map[vrf_oid] = vnet_name + + routes_db_keys = tbl.getKeys() + + vnet_routes = {} + + for route_db_key in routes_db_keys: + route_attrs = route_db_key.lower().split('\"', -1) + + if 'sai_object_type_route_entry' not in route_attrs[0]: + continue + + # route_attrs[11] - VRF OID for the VNET route + # route_attrs[3] - VNET route IP subnet + vrf_oid = route_attrs[11] + ip_addr = route_attrs[3] + + if vrf_oid in vnet_vrfs_oids: + if vrf_oid_to_vnet_map[vrf_oid] not in vnet_routes: + vnet_name = vrf_oid_to_vnet_map[vrf_oid] + + vnet_routes[vnet_name] = {} + vnet_routes[vnet_name]['routes'] = [] + vnet_routes[vnet_name]['vrf_oid'] = vrf_oid + + vnet_routes[vnet_name]['routes'].append(ip_addr) + + filter_out_vnet_ip2me_routes(vnet_routes) + + return vnet_routes + + +def get_vnet_routes_diff(routes_1, routes_2): + ''' Returns all routes present in routes_2 dictionary but missed in routes_1 + Format: { : { 'routes': [ ] } } + ''' + + routes = {} + + for vnet_name, vnet_attrs in routes_2.items(): + if vnet_name not in routes_1: + routes[vnet_name] = routes + else: + for vnet_route in vnet_attrs['routes']: + if vnet_route not in routes_1[vnet_name]['routes']: + if vnet_name not in routes: + routes[vnet_name] = {} + routes[vnet_name]['routes'] = [] + routes[vnet_name]['routes'].append(vnet_route) + + return routes + + +def get_sdk_vnet_routes_diff(routes): + ''' Returns all routes present in routes dictionary but missed in SAI/SDK + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + routes_diff = {} + + res = os.system('docker exec syncd test -f /usr/bin/vnet_route_check.py') + if res != 0: + return routes_diff + + for vnet_name, vnet_routes in routes.items(): + vnet_routes = routes[vnet_name]["routes"] + vnet_vrf_oid = routes[vnet_name]["vrf_oid"] + + res = os.system('docker exec syncd "/usr/bin/vnet_route_check.py {} {}"'.format(vnet_vrf_oid, vnet_routes)) + if res: + routes_diff[vnet_name] = {} + routes_diff[vnet_name]['routes'] = res + + return routes_diff + + +def main(): + + rc = RC_OK + + # Don't run VNET routes consistancy logic if there is no VNET configuration + if not check_vnet_cfg(): + return rc + + app_db_vnet_routes = get_vnet_routes_from_app_db() + asic_db_vnet_routes = get_vnet_routes_from_asic_db() + + missed_in_asic_db_routes = get_vnet_routes_diff(asic_db_vnet_routes, app_db_vnet_routes) + missed_in_app_db_routes = get_vnet_routes_diff(app_db_vnet_routes, asic_db_vnet_routes) + missed_in_sdk_routes = get_sdk_vnet_routes_diff(asic_db_vnet_routes) + + res = {} + res['results'] = {} + rc = RC_OK + + if missed_in_asic_db_routes: + res['results']['missed_in_asic_db_routes'] = missed_in_asic_db_routes + + if missed_in_app_db_routes: + res['results']['missed_in_app_db_routes'] = missed_in_app_db_routes + + if missed_in_sdk_routes: + res['results']['missed_in_sdk_routes'] = missed_in_sdk_routes + + if res['results']: + rc = RC_ERR + print_message(syslog.LOG_ERR, json.dumps(res, indent=4)) + print_message(syslog.LOG_ERR, 'Vnet Route Mismatch reported') + + return rc, res + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/setup.py b/setup.py index b8b6d07229..d857cd3723 100644 --- a/setup.py +++ b/setup.py @@ -117,6 +117,7 @@ 'scripts/reboot', 'scripts/route_check.py', 'scripts/route_check_test.sh', + 'scripts/vnet_route_check.py', 'scripts/sfpshow', 'scripts/storyteller', 'scripts/syseeprom-to-json', diff --git a/tests/vnet_route_check_test.py b/tests/vnet_route_check_test.py new file mode 100644 index 0000000000..09f35761a4 --- /dev/null +++ b/tests/vnet_route_check_test.py @@ -0,0 +1,325 @@ +import copy +import json +import os +import sys +from unittest.mock import MagicMock, patch + +import pytest + +sys.path.append("scripts") +import vnet_route_check + +DESCR = "Description" +ARGS = "args" +RET = "return" +APPL_DB = 0 +ASIC_DB = 1 +CNTR_DB = 2 +PRE = "pre-value" +UPD = "update" +RESULT = "res" + +OP_SET = "SET" +OP_DEL = "DEL" + +VXLAN_TUNNEL_TABLE = "VXLAN_TUNNEL_TABLE" +VNET_TABLE = "VNET_TABLE" +VNET_ROUTE_TABLE = "VNET_ROUTE_TABLE" +INTF_TABLE = "INTF_TABLE" +ASIC_STATE = "ASIC_STATE" + +RT_ENTRY_KEY_PREFIX = 'SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest":\"' +RT_ENTRY_KEY_SUFFIX = '\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000d4b\"}' + +current_test_name = None +current_test_no = None +current_test_data = None + +tables_returned = {} + +test_data = { + "0": { + DESCR: "All VNET routes are configured in both APP and ASIC DBs", + ARGS: "vnet_route_check", + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": {} + } + }, + "1": { + DESCR: "VNET route is missed in ASIC DB", + ARGS: "vnet_route_check", + RET: -1, + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": { + "missed_in_asic_db_routes": { + "Vnet1": { + "routes": [ + "50.2.2.0/24" + ] + } + } + } + } + }, + "2": { + DESCR: "VNET route is missed in APP DB", + ARGS: "vnet_route_check", + RET: -1, + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": { + "missed_in_app_db_routes": { + "Vnet1": { + "routes": [ + "50.2.2.0/24" + ] + } + } + } + } + }, + "3": { + DESCR: "VNET routes are missed in both ASIC and APP DB", + ARGS: "vnet_route_check", + RET: -1, + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": { + "missed_in_app_db_routes": { + "Vnet1": { + "routes": [ + "50.2.2.0/24" + ] + } + }, + "missed_in_asic_db_routes": { + "Vnet1": { + "routes": [ + "50.1.1.0/24" + ] + } + } + } + } + } +} + + +def do_start_test(tname, tno, ctdata): + global current_test_name, current_test_no, current_test_data + global tables_returned + + current_test_name = tname + current_test_no = tno + current_test_data = ctdata + tables_returned = {} + + print("Starting test case {} number={}".format(tname, tno)) + + +class Table: + def __init__(self, db, tbl): + self.db = db + self.tbl = tbl + self.data = copy.deepcopy(self.get_val(current_test_data[PRE], [db, tbl])) + + def get_val(self, d, keys): + for k in keys: + d = d[k] if k in d else {} + return d + + def getKeys(self): + return list(self.data.keys()) + + def get(self, key): + ret = copy.deepcopy(self.data.get(key, {})) + return (True, ret) + + +db_conns = {"APPL_DB": APPL_DB, "ASIC_DB": ASIC_DB, "COUNTERS_DB": CNTR_DB} +def conn_side_effect(arg, _): + return db_conns[arg] + + +def table_side_effect(db, tbl): + if not db in tables_returned: + tables_returned[db] = {} + if not tbl in tables_returned[db]: + tables_returned[db][tbl] = Table(db, tbl) + return tables_returned[db][tbl] + + +class mock_db_conn: + def __init__(self, db): + self.db_name = None + for (k, v) in db_conns.items(): + if v == db: + self.db_name = k + assert self.db_name != None + + def getDbName(self): + return self.db_name + + +def table_side_effect(db, tbl): + if not db in tables_returned: + tables_returned[db] = {} + if not tbl in tables_returned[db]: + tables_returned[db][tbl] = Table(db, tbl) + return tables_returned[db][tbl] + + +def set_mock(mock_table, mock_conn): + mock_conn.side_effect = conn_side_effect + mock_table.side_effect = table_side_effect + + +class TestVnetRouteCheck(object): + def setup(self): + pass + + def init(self): + vnet_route_check.UNIT_TESTING = 1 + + @patch("vnet_route_check.swsscommon.DBConnector") + @patch("vnet_route_check.swsscommon.Table") + def test_vnet_route_check(self, mock_table, mock_conn): + self.init() + ret = 0 + + set_mock(mock_table, mock_conn) + for (i, ct_data) in test_data.items(): + do_start_test("route_test", i, ct_data) + + with patch('sys.argv', ct_data[ARGS].split()): + ret, res = vnet_route_check.main() + expect_ret = ct_data[RET] if RET in ct_data else 0 + expect_res = ct_data[RESULT] if RESULT in ct_data else None + if res: + print("res={}".format(json.dumps(res, indent=4))) + if expect_res: + print("expect_res={}".format(json.dumps(expect_res, indent=4))) + assert ret == expect_ret + assert res == expect_res From 0f4988bc285ee14aa383e004a42d2043716713fa Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Tue, 4 May 2021 10:35:32 +0300 Subject: [PATCH 5/9] Add pg-drop script to sonic filesystem (#1583) - What I did Added 'pg-drop' to the files system of SONiC - How I did it Add 'scripts/pg-drop' to setup.py file - How to verify it Check that 'pg-drop' script exists in /usr/local/bin/pg-drop Signed-off-by: Andriy Yurkiv --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index d857cd3723..77a771650f 100644 --- a/setup.py +++ b/setup.py @@ -108,6 +108,7 @@ 'scripts/nbrshow', 'scripts/neighbor_advertiser', 'scripts/pcmping', + 'scripts/pg-drop', 'scripts/port2alias', 'scripts/portconfig', 'scripts/portstat', From 9492eabcf09161a5f4410eb51eb740f70570c605 Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Tue, 4 May 2021 19:34:27 +0300 Subject: [PATCH 6/9] Use swsscommon instead of swsssdk (#1510) #### What I did Changed code to use use swsscommon instead of swsssdk in counterpoll #### How I did it Removed an explicit function call from sdsssdk module #### How to verify it run counterpoll tests --- counterpoll/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/counterpoll/main.py b/counterpoll/main.py index ff9ca49dd4..cc0060d991 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -129,7 +129,7 @@ def disable(): @click.pass_context def pg_drop(ctx): """ Ingress PG drop counter commands """ - ctx.obj = swsssdk.ConfigDBConnector() + ctx.obj = ConfigDBConnector() ctx.obj.connect() @pg_drop.command() From be974bf33604356015c1fc39f563677256ff92ef Mon Sep 17 00:00:00 2001 From: Sumukha Tumkur Vani Date: Wed, 5 May 2021 09:16:22 -0700 Subject: [PATCH 7/9] [neighbor_advertiser] Use existing tunnel if present for creating tunnel mappings (#1589) --- scripts/neighbor_advertiser | 18 ++++++++++++++---- tests/neighbor_advertiser_test.py | 9 +++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/scripts/neighbor_advertiser b/scripts/neighbor_advertiser index dc38cf26c3..5cc09d32c9 100644 --- a/scripts/neighbor_advertiser +++ b/scripts/neighbor_advertiser @@ -169,9 +169,11 @@ def get_loopback_addr(ip_ver): def get_vlan_interfaces(): vlan_info = config_db.get_table('VLAN') vlan_interfaces = [] - + vlan_intfs = config_db.get_table('VLAN_INTERFACE') + # Skip L2 VLANs for vlan_name in vlan_info: - vlan_interfaces.append(vlan_name) + if vlan_name in vlan_intfs: + vlan_interfaces.append(vlan_name) return vlan_interfaces @@ -502,6 +504,14 @@ def reset_mirror_tunnel(): # Set vxlan tunnel # +def check_existing_tunnel(): + vxlan_tunnel = config_db.get_table('VXLAN_TUNNEL') + if len(vxlan_tunnel): + global VXLAN_TUNNEL_NAME + VXLAN_TUNNEL_NAME = list(vxlan_tunnel.keys())[0] + return True + return False + def add_vxlan_tunnel(dst_ipv4_addr): vxlan_tunnel_info = { 'src_ip': get_loopback_addr(4), @@ -517,12 +527,12 @@ def add_vxlan_tunnel_map(): 'vni': get_vlan_interface_vxlan_id(vlan_intf_name), 'vlan': vlan_intf_name } - config_db.set_entry('VXLAN_TUNNEL_MAP', (VXLAN_TUNNEL_NAME, VXLAN_TUNNEL_MAP_PREFIX + str(index)), vxlan_tunnel_map_info) def set_vxlan_tunnel(ferret_server_ip): - add_vxlan_tunnel(ferret_server_ip) + if not check_existing_tunnel(): + add_vxlan_tunnel(ferret_server_ip) add_vxlan_tunnel_map() log.log_info('Finish setting vxlan tunnel; Ferret: {}'.format(ferret_server_ip)) diff --git a/tests/neighbor_advertiser_test.py b/tests/neighbor_advertiser_test.py index 4a7ab41863..3ad575c983 100644 --- a/tests/neighbor_advertiser_test.py +++ b/tests/neighbor_advertiser_test.py @@ -57,3 +57,12 @@ def test_neighbor_advertiser_slice(self, set_up): } ) assert output == expected_output + + def test_set_vxlan(self, set_up): + assert(neighbor_advertiser.check_existing_tunnel()) + neighbor_advertiser.add_vxlan_tunnel_map() + tunnel_mapping = neighbor_advertiser.config_db.get_table('VXLAN_TUNNEL_MAP') + expected_mapping = {("vtep1", "map_1"): {"vni": "1000", "vlan": "Vlan1000"}, ("vtep1", "map_2"): {"vni": "2000", "vlan": "Vlan2000"}} + for key in expected_mapping.keys(): + assert(key in tunnel_mapping.keys()) + assert(expected_mapping[key] == tunnel_mapping[key]) From fff40512b21c5e894856a3723e52b5f711cf8edd Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Wed, 5 May 2021 10:09:52 -0700 Subject: [PATCH 8/9] Fixing serial number read to get from DB if it is populated (#1580) #### What I did Modified show version command to pick serial number from STATE_DB if it was populated instead of getting it from EEPROM. #### How I did it Check state_db to see if serial number EEPROM section is populated. If yes use the data from DB. If not, read it from the decode-syseeprom --- show/main.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/show/main.py b/show/main.py index b0b2986a78..d0ca14650a 100755 --- a/show/main.py +++ b/show/main.py @@ -960,8 +960,15 @@ def version(verbose): asic_type = version_info['asic_type'] asic_count = multi_asic.get_num_asics() - serial_number_cmd = "sudo decode-syseeprom -s" - serial_number = subprocess.Popen(serial_number_cmd, shell=True, text=True, stdout=subprocess.PIPE) + serial_number = None + db = SonicV2Connector() + db.connect(db.STATE_DB) + eeprom_table = db.get_all(db.STATE_DB, 'EEPROM_INFO|0x23') + if "Name" in eeprom_table and eeprom_table["Name"] == "Serial Number" and "Value" in eeprom_table: + serial_number = eeprom_table["Value"] + else: + serial_number_cmd = "sudo decode-syseeprom -s" + serial_number = subprocess.Popen(serial_number_cmd, shell=True, text=True, stdout=subprocess.PIPE).stdout.read() sys_uptime_cmd = "uptime" sys_uptime = subprocess.Popen(sys_uptime_cmd, shell=True, text=True, stdout=subprocess.PIPE) @@ -976,7 +983,7 @@ def version(verbose): click.echo("HwSKU: {}".format(hwsku)) click.echo("ASIC: {}".format(asic_type)) click.echo("ASIC Count: {}".format(asic_count)) - click.echo("Serial Number: {}".format(serial_number.stdout.read().strip())) + click.echo("Serial Number: {}".format(serial_number.strip())) click.echo("Uptime: {}".format(sys_uptime.stdout.read().strip())) click.echo("\nDocker images:") cmd = 'sudo docker images --format "table {{.Repository}}\\t{{.Tag}}\\t{{.ID}}\\t{{.Size}}"' From 615e5312a2fa89e9f4736832338ce7a9256e7004 Mon Sep 17 00:00:00 2001 From: Travis Van Duyn Date: Wed, 5 May 2021 12:22:40 -0700 Subject: [PATCH 9/9] [show][config] Add new snmp commands (#1347) Added new SNMP show and config commands using ConfigDB as well as unittests. show commands: show runningconfiguration snmp show runningconfiguration snmp contact [--json] show runningconfiguration snmp location [--json] show runningconfiguration snmp community [--json] show runningconfiguration snmp user [--json] config commands: sudo config snmp community add/del/replace sudo config snmp contact add/del/modify sudo config snmp location add/del/modify sudo config snmp user add/del --- config/main.py | 540 +++++++++++++++++++ show/main.py | 179 ++++++- tests/config_snmp_test.py | 872 +++++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 166 ++++++ tests/show_snmp_test.py | 467 +++++++++++++++++ 5 files changed, 2210 insertions(+), 14 deletions(-) create mode 100644 tests/config_snmp_test.py create mode 100644 tests/show_snmp_test.py diff --git a/config/main.py b/config/main.py index e9bab3172d..953af72e79 100644 --- a/config/main.py +++ b/config/main.py @@ -2191,6 +2191,546 @@ def delete_snmptrap_server(ctx, ver): cmd="systemctl restart snmp" os.system (cmd) + + +# +# 'snmp' group ('config snmp ...') +# +@config.group(cls=clicommon.AbbreviationGroup, name='snmp') +@clicommon.pass_db +def snmp(db): + """SNMP configuration tasks""" + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def community(db): + pass + + +def is_valid_community_type(commstr_type): + commstr_types = ['RO', 'RW'] + if commstr_type not in commstr_types: + click.echo("Invalid community type. Must be either RO or RW") + return False + return True + + +def is_valid_user_type(user_type): + convert_user_type = {'noauthnopriv': 'noAuthNoPriv', 'authnopriv': 'AuthNoPriv', 'priv': 'Priv'} + if user_type not in convert_user_type: + message = ("Invalid user type. Must be one of these one of these three " + "'noauthnopriv' or 'authnopriv' or 'priv'") + click.echo(message) + return False, message + return True, convert_user_type[user_type] + + +def is_valid_auth_type(user_auth_type): + user_auth_types = ['MD5', 'SHA', 'HMAC-SHA-2'] + if user_auth_type not in user_auth_types: + click.echo("Invalid user authentication type. Must be one of these 'MD5', 'SHA', or 'HMAC-SHA-2'") + return False + return True + + +def is_valid_encrypt_type(encrypt_type): + encrypt_types = ['DES', 'AES'] + if encrypt_type not in encrypt_types: + click.echo("Invalid user encryption type. Must be one of these two 'DES' or 'AES'") + return False + return True + + +def snmp_community_secret_check(snmp_secret): + excluded_special_symbols = ['@', ":"] + if len(snmp_secret) > 32: + click.echo("SNMP community string length should be not be greater than 32") + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP community string length should be not be greater than 32") + return False + if any(char in excluded_special_symbols for char in snmp_secret): + click.echo("SNMP community string length should be not be greater than 32") + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP community string should not have any of these " + "special symbols {}".format(excluded_special_symbols)) + return False + return True + + +def snmp_username_check(snmp_username): + excluded_special_symbols = ['@', ":"] + if len(snmp_username) > 32: + click.echo("SNMP user {} length should be not be greater than 32 characters".format(snmp_username)) + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user {} length should not be greater than 32 characters".format(snmp_username)) + return False + if any(char in excluded_special_symbols for char in snmp_username): + click.echo("SNMP user {} length should be not be greater than 32 characters".format(snmp_username)) + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user {} should not have any of these special " + "symbols {}".format(snmp_username, excluded_special_symbols)) + return False + return True + + +def snmp_user_secret_check(snmp_secret): + excluded_special_symbols = ['@', ":"] + if len(snmp_secret) < 8: + click.echo("SNMP user password length should be at least 8 characters") + click.echo("SNMP user password length should be not be greater than 64") + click.echo("SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user password length should be at least 8 characters") + return False + if len(snmp_secret) > 64: + click.echo("SNMP user password length should be at least 8 characters") + click.echo("SNMP user password length should be not be greater than 64") + click.echo("SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user password length should be not be greater than 64") + return False + if any(char in excluded_special_symbols for char in snmp_secret): + click.echo("SNMP user password length should be at least 8 characters") + click.echo("SNMP user password length should be not be greater than 64") + click.echo("SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + return False + return True + + +@community.command('add') +@click.argument('community', metavar='', required=True) +@click.argument('string_type', metavar='', required=True) +@clicommon.pass_db +def add_community(db, community, string_type): + """ Add snmp community string""" + string_type = string_type.upper() + if not is_valid_community_type(string_type): + sys.exit(1) + if not snmp_community_secret_check(community): + sys.exit(2) + snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY") + if community in snmp_communities: + click.echo("SNMP community {} is already configured".format(community)) + sys.exit(3) + db.cfgdb.set_entry('SNMP_COMMUNITY', community, {'TYPE': string_type}) + click.echo("SNMP community {} added to configuration".format(community)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@community.command('del') +@click.argument('community', metavar='', required=True) +@clicommon.pass_db +def del_community(db, community): + """ Delete snmp community string""" + snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY") + if community not in snmp_communities: + click.echo("SNMP community {} is not configured".format(community)) + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP_COMMUNITY', community, None) + click.echo("SNMP community {} removed from configuration".format(community)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@community.command('replace') +@click.argument('current_community', metavar='', required=True) +@click.argument('new_community', metavar='', required=True) +@clicommon.pass_db +def replace_community(db, current_community, new_community): + """ Replace snmp community string""" + snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY") + if not current_community in snmp_communities: + click.echo("Current SNMP community {} is not configured".format(current_community)) + sys.exit(1) + if not snmp_community_secret_check(new_community): + sys.exit(2) + elif new_community in snmp_communities: + click.echo("New SNMP community {} to replace current SNMP community {} already " + "configured".format(new_community, current_community)) + sys.exit(3) + else: + string_type = snmp_communities[current_community]['TYPE'] + db.cfgdb.set_entry('SNMP_COMMUNITY', new_community, {'TYPE': string_type}) + click.echo("SNMP community {} added to configuration".format(new_community)) + db.cfgdb.set_entry('SNMP_COMMUNITY', current_community, None) + click.echo('SNMP community {} replace community {}'.format(new_community, current_community)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def contact(db): + pass + + +def is_valid_email(email): + return bool(re.search(r"^[\w\.\+\-]+\@[\w]+\.[a-z]{2,3}$", email)) + + +@contact.command('add') +@click.argument('contact', metavar='', required=True) +@click.argument('contact_email', metavar='', required=True) +@clicommon.pass_db +def add_contact(db, contact, contact_email): + """ Add snmp contact name and email """ + snmp = db.cfgdb.get_table("SNMP") + try: + if snmp['CONTACT']: + click.echo("Contact already exists. Use sudo config snmp contact modify instead") + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("Contact name {} and contact email {} have been added to " + "configuration".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + if "CONTACT" not in snmp.keys(): + if not is_valid_email(contact_email): + click.echo("Contact email {} is not valid".format(contact_email)) + sys.exit(2) + db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("Contact name {} and contact email {} have been added to " + "configuration".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@contact.command('del') +@click.argument('contact', metavar='', required=True) +@clicommon.pass_db +def del_contact(db, contact): + """ Delete snmp contact name and email """ + snmp = db.cfgdb.get_table("SNMP") + try: + if not contact in (list(snmp['CONTACT'].keys()))[0]: + click.echo("SNMP contact {} is not configured".format(contact)) + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP', 'CONTACT', None) + click.echo("SNMP contact {} removed from configuration".format(contact)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + if "CONTACT" not in snmp.keys(): + click.echo("Contact name {} is not configured".format(contact)) + sys.exit(2) + + +@contact.command('modify') +@click.argument('contact', metavar='', required=True) +@click.argument('contact_email', metavar='', required=True) +@clicommon.pass_db +def modify_contact(db, contact, contact_email): + """ Modify snmp contact""" + snmp = db.cfgdb.get_table("SNMP") + try: + current_snmp_contact_name = (list(snmp['CONTACT'].keys()))[0] + if current_snmp_contact_name == contact: + current_snmp_contact_email = snmp['CONTACT'][contact] + else: + current_snmp_contact_email = '' + if contact == current_snmp_contact_name and contact_email == current_snmp_contact_email: + click.echo("SNMP contact {} {} already exists".format(contact, contact_email)) + sys.exit(1) + elif contact == current_snmp_contact_name and contact_email != current_snmp_contact_email: + if not is_valid_email(contact_email): + click.echo("Contact email {} is not valid".format(contact_email)) + sys.exit(2) + db.cfgdb.mod_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("SNMP contact {} email updated to {}".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + else: + if not is_valid_email(contact_email): + click.echo("Contact email {} is not valid".format(contact_email)) + sys.exit(2) + db.cfgdb.set_entry('SNMP', 'CONTACT', None) + db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("SNMP contact {} and contact email {} updated".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + if "CONTACT" not in snmp.keys(): + click.echo("Contact name {} is not configured".format(contact)) + sys.exit(3) + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def location(db): + pass + + +@location.command('add') +@click.argument('location', metavar='', required=True, nargs=-1) +@clicommon.pass_db +def add_location(db, location): + """ Add snmp location""" + if isinstance(location, tuple): + location = " ".join(location) + elif isinstance(location, list): + location = " ".join(location) + snmp = db.cfgdb.get_table("SNMP") + try: + if snmp['LOCATION']: + click.echo("Location already exists") + sys.exit(1) + except KeyError: + if "LOCATION" not in snmp.keys(): + db.cfgdb.set_entry('SNMP', 'LOCATION', {'Location': location}) + click.echo("SNMP Location {} has been added to configuration".format(location)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@location.command('del') +@click.argument('location', metavar='', required=True, nargs=-1) +@clicommon.pass_db +def delete_location(db, location): + """ Delete snmp location""" + if isinstance(location, tuple): + location = " ".join(location) + elif isinstance(location, list): + location = " ".join(location) + snmp = db.cfgdb.get_table("SNMP") + try: + if location == snmp['LOCATION']['Location']: + db.cfgdb.set_entry('SNMP', 'LOCATION', None) + click.echo("SNMP Location {} removed from configuration".format(location)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + else: + click.echo("SNMP Location {} does not exist. The location is {}".format(location, snmp['LOCATION']['Location'])) + sys.exit(1) + except KeyError: + if "LOCATION" not in snmp.keys(): + click.echo("SNMP Location {} is not configured".format(location)) + sys.exit(2) + + +@location.command('modify') +@click.argument('location', metavar='', required=True, nargs=-1) +@clicommon.pass_db +def modify_location(db, location): + """ Modify snmp location""" + if isinstance(location, tuple): + location = " ".join(location) + elif isinstance(location, list): + location = " ".join(location) + snmp = db.cfgdb.get_table("SNMP") + try: + snmp_location = snmp['LOCATION']['Location'] + if location in snmp_location: + click.echo("SNMP location {} already exists".format(location)) + sys.exit(1) + else: + db.cfgdb.mod_entry('SNMP', 'LOCATION', {'Location': location}) + click.echo("SNMP location {} modified in configuration".format(location)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + click.echo("Cannot modify SNMP Location. You must use 'config snmp location add command '") + sys.exit(2) + + +from enum import IntEnum +class SnmpUserError(IntEnum): + NameCheckFailure = 1 + TypeNoAuthNoPrivOrAuthNoPrivOrPrivCheckFailure = 2 + RoRwCheckFailure = 3 + NoAuthNoPrivHasAuthType = 4 + AuthTypeMd5OrShaOrHmacsha2IsMissing = 5 + AuthTypeMd5OrShaOrHmacsha2Failure = 6 + AuthPasswordMissing = 7 + AuthPasswordFailsComplexityRequirements = 8 + EncryptPasswordNotAllowedWithAuthNoPriv = 9 + EncryptTypeDesOrAesIsMissing = 10 + EncryptTypeFailsComplexityRequirements = 11 + EncryptPasswordMissingFailure = 12 + EncryptPasswordFailsComplexityRequirements = 13 + UserAlreadyConfigured = 14 + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def user(db): + pass + + +@user.command('add') +@click.argument('user', metavar='', required=True) +@click.argument('user_type', metavar='', required=True) +@click.argument('user_permission_type', metavar='', required=True) +@click.argument('user_auth_type', metavar='', required=False) +@click.argument('user_auth_password', metavar='', required=False) +@click.argument('user_encrypt_type', metavar='', required=False) +@click.argument('user_encrypt_password', metavar='', required=False) +@clicommon.pass_db +def add_user(db, user, user_type, user_permission_type, user_auth_type, user_auth_password, user_encrypt_type, + user_encrypt_password): + """ Add snmp user""" + if not snmp_username_check(user): + sys.exit(SnmpUserError.NameCheckFailure) + user_type = user_type.lower() + user_type_info = is_valid_user_type(user_type) + if not user_type_info[0]: + sys.exit(SnmpUserError.TypeNoAuthNoPrivOrAuthNoPrivOrPrivCheckFailure) + user_type = user_type_info[1] + user_permission_type = user_permission_type.upper() + if not is_valid_community_type(user_permission_type): + sys.exit(SnmpUserError.RoRwCheckFailure) + if user_type == "noAuthNoPriv": + if user_auth_type: + click.echo("User auth type not used with 'noAuthNoPriv'. Please use 'AuthNoPriv' or 'Priv' instead") + sys.exit(SnmpUserError.NoAuthNoPrivHasAuthType) + else: + if not user_auth_type: + click.echo("User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2") + sys.exit(SnmpUserError.AuthTypeMd5OrShaOrHmacsha2IsMissing) + if user_auth_type: + user_auth_type = user_auth_type.upper() + if not is_valid_auth_type(user_auth_type): + sys.exit(SnmpUserError.AuthTypeMd5OrShaOrHmacsha2Failure) + elif not user_auth_password: + click.echo("User auth password is missing") + sys.exit(SnmpUserError.AuthPasswordMissing) + elif user_auth_password: + if not snmp_user_secret_check(user_auth_password): + sys.exit(SnmpUserError.AuthPasswordFailsComplexityRequirements) + if user_type == "AuthNoPriv": + if user_encrypt_type: + click.echo("User encrypt type not used with 'AuthNoPriv'. Please use 'Priv' instead") + sys.exit(SnmpUserError.EncryptPasswordNotAllowedWithAuthNoPriv) + elif user_type == "Priv": + if not user_encrypt_type: + click.echo("User encrypt type is missing. Must be DES or AES") + sys.exit(SnmpUserError.EncryptTypeDesOrAesIsMissing) + if user_encrypt_type: + user_encrypt_type = user_encrypt_type.upper() + if not is_valid_encrypt_type(user_encrypt_type): + sys.exit(SnmpUserError.EncryptTypeFailsComplexityRequirements) + elif not user_encrypt_password: + click.echo("User encrypt password is missing") + sys.exit(SnmpUserError.EncryptPasswordMissingFailure) + elif user_encrypt_password: + if not snmp_user_secret_check(user_encrypt_password): + sys.exit(SnmpUserError.EncryptPasswordFailsComplexityRequirements) + snmp_users = db.cfgdb.get_table("SNMP_USER") + if user in snmp_users.keys(): + click.echo("SNMP user {} is already configured".format(user)) + sys.exit(SnmpUserError.UserAlreadyConfigured) + else: + if not user_auth_type: + user_auth_type = '' + if not user_auth_password: + user_auth_password = '' + if not user_encrypt_type: + user_encrypt_type = '' + if not user_encrypt_password: + user_encrypt_password = '' + db.cfgdb.set_entry('SNMP_USER', user, {'SNMP_USER_TYPE': user_type, + 'SNMP_USER_PERMISSION': user_permission_type, + 'SNMP_USER_AUTH_TYPE': user_auth_type, + 'SNMP_USER_AUTH_PASSWORD': user_auth_password, + 'SNMP_USER_ENCRYPTION_TYPE': user_encrypt_type, + 'SNMP_USER_ENCRYPTION_PASSWORD': user_encrypt_password}) + click.echo("SNMP user {} added to configuration".format(user)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@user.command('del') +@click.argument('user', metavar='', required=True) +@clicommon.pass_db +def del_user(db, user): + """ Del snmp user""" + snmp_users = db.cfgdb.get_table("SNMP_USER") + if user not in snmp_users: + click.echo("SNMP user {} is not configured".format(user)) + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP_USER', user, None) + click.echo("SNMP user {} removed from configuration".format(user)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + # # 'bgp' group ('config bgp ...') # diff --git a/show/main.py b/show/main.py index d0ca14650a..1cea9e6534 100755 --- a/show/main.py +++ b/show/main.py @@ -377,6 +377,7 @@ def snmptrap (ctx): body.append([ver, traptable[row]['DestIp'], traptable[row]['DestPort'], traptable[row]['vrf'], traptable[row]['Community']]) click.echo(tabulate(body, header)) + # # 'subinterfaces' group ("show subinterfaces ...") # @@ -1109,20 +1110,6 @@ def interfaces(interfacename, verbose): run_command(cmd, display_cmd=verbose) -# 'snmp' subcommand ("show runningconfiguration snmp") -@runningconfiguration.command() -@click.argument('server', required=False) -@click.option('--verbose', is_flag=True, help="Enable verbose output") -def snmp(server, verbose): - """Show SNMP information""" - cmd = "sudo docker exec snmp cat /etc/snmp/snmpd.conf" - - if server is not None: - cmd += " | grep -i agentAddress" - - run_command(cmd, display_cmd=verbose) - - # 'ntp' subcommand ("show runningconfiguration ntp") @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") @@ -1140,6 +1127,170 @@ def ntp(verbose): print(tabulate(ntp_dict, headers=list(ntp_dict.keys()), tablefmt="simple", stralign='left', missingval="")) + +# 'snmp' subcommand ("show runningconfiguration snmp") +@runningconfiguration.group("snmp", invoke_without_command=True) +@clicommon.pass_db +@click.pass_context +def snmp(ctx, db): + """Show SNMP running configuration""" + if ctx.invoked_subcommand is None: + show_run_snmp(db.cfgdb) + + +# ("show runningconfiguration snmp community") +@snmp.command('community') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def community(db, json_output): + """show SNMP running configuration community""" + snmp_comm_header = ["Community String", "Community Type"] + snmp_comm_body = [] + snmp_comm_keys = db.cfgdb.get_table('SNMP_COMMUNITY') + snmp_comm_strings = snmp_comm_keys.keys() + if json_output: + click.echo(snmp_comm_keys) + else: + for line in snmp_comm_strings: + comm_string = line + comm_string_type = snmp_comm_keys[line]['TYPE'] + snmp_comm_body.append([comm_string, comm_string_type]) + click.echo(tabulate(natsorted(snmp_comm_body), snmp_comm_header)) + + +# ("show runningconfiguration snmp contact") +@snmp.command('contact') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def contact(db, json_output): + """show SNMP running configuration contact""" + snmp = db.cfgdb.get_table('SNMP') + snmp_header = ["Contact", "Contact Email"] + snmp_body = [] + if json_output: + try: + if snmp['CONTACT']: + click.echo(snmp['CONTACT']) + except KeyError: + snmp['CONTACT'] = {} + click.echo(snmp['CONTACT']) + else: + try: + if snmp['CONTACT']: + snmp_contact = list(snmp['CONTACT'].keys()) + snmp_contact_email = [snmp['CONTACT'][snmp_contact[0]]] + snmp_body.append([snmp_contact[0], snmp_contact_email[0]]) + except KeyError: + snmp['CONTACT'] = '' + click.echo(tabulate(snmp_body, snmp_header)) + + +# ("show runningconfiguration snmp location") +@snmp.command('location') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def location(db, json_output): + """show SNMP running configuration location""" + snmp = db.cfgdb.get_table('SNMP') + snmp_header = ["Location"] + snmp_body = [] + if json_output: + try: + if snmp['LOCATION']: + click.echo(snmp['LOCATION']) + except KeyError: + snmp['LOCATION'] = {} + click.echo(snmp['LOCATION']) + else: + try: + if snmp['LOCATION']: + snmp_location = [snmp['LOCATION']['Location']] + snmp_body.append(snmp_location) + except KeyError: + snmp['LOCATION'] = '' + click.echo(tabulate(snmp_body, snmp_header)) + + +# ("show runningconfiguration snmp user") +@snmp.command('user') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def users(db, json_output): + """show SNMP running configuration user""" + snmp_users = db.cfgdb.get_table('SNMP_USER') + snmp_user_header = ['User', "Permission Type", "Type", "Auth Type", "Auth Password", "Encryption Type", + "Encryption Password"] + snmp_user_body = [] + if json_output: + click.echo(snmp_users) + else: + for snmp_user, snmp_user_value in snmp_users.items(): + snmp_user_permissions_type = snmp_users[snmp_user].get('SNMP_USER_PERMISSION', 'Null') + snmp_user_auth_type = snmp_users[snmp_user].get('SNMP_USER_AUTH_TYPE', 'Null') + snmp_user_auth_password = snmp_users[snmp_user].get('SNMP_USER_AUTH_PASSWORD', 'Null') + snmp_user_encryption_type = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_TYPE', 'Null') + snmp_user_encryption_password = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_PASSWORD', 'Null') + snmp_user_type = snmp_users[snmp_user].get('SNMP_USER_TYPE', 'Null') + snmp_user_body.append([snmp_user, snmp_user_permissions_type, snmp_user_type, snmp_user_auth_type, + snmp_user_auth_password, snmp_user_encryption_type, snmp_user_encryption_password]) + click.echo(tabulate(natsorted(snmp_user_body), snmp_user_header)) + + +# ("show runningconfiguration snmp") +@clicommon.pass_db +def show_run_snmp(db, ctx): + snmp_contact_location_table = db.cfgdb.get_table('SNMP') + snmp_comm_table = db.cfgdb.get_table('SNMP_COMMUNITY') + snmp_users = db.cfgdb.get_table('SNMP_USER') + snmp_location_header = ["Location"] + snmp_location_body = [] + snmp_contact_header = ["SNMP_CONTACT", "SNMP_CONTACT_EMAIL"] + snmp_contact_body = [] + snmp_comm_header = ["Community String", "Community Type"] + snmp_comm_body = [] + snmp_user_header = ['User', "Permission Type", "Type", "Auth Type", "Auth Password", "Encryption Type", + "Encryption Password"] + snmp_user_body = [] + try: + if snmp_contact_location_table['LOCATION']: + snmp_location = [snmp_contact_location_table['LOCATION']['Location']] + snmp_location_body.append(snmp_location) + except KeyError: + snmp_contact_location_table['LOCATION'] = '' + click.echo(tabulate(snmp_location_body, snmp_location_header)) + click.echo("\n") + try: + if snmp_contact_location_table['CONTACT']: + snmp_contact = list(snmp_contact_location_table['CONTACT'].keys()) + snmp_contact_email = [snmp_contact_location_table['CONTACT'][snmp_contact[0]]] + snmp_contact_body.append([snmp_contact[0], snmp_contact_email[0]]) + except KeyError: + snmp_contact_location_table['CONTACT'] = '' + click.echo(tabulate(snmp_contact_body, snmp_contact_header)) + click.echo("\n") + snmp_comm_strings = snmp_comm_table.keys() + for line in snmp_comm_strings: + comm_string = line + comm_string_type = snmp_comm_table[line]['TYPE'] + snmp_comm_body.append([comm_string, comm_string_type]) + click.echo(tabulate(natsorted(snmp_comm_body), snmp_comm_header)) + click.echo("\n") + for snmp_user, snmp_user_value in snmp_users.items(): + snmp_user_permissions_type = snmp_users[snmp_user].get('SNMP_USER_PERMISSION', 'Null') + snmp_user_auth_type = snmp_users[snmp_user].get('SNMP_USER_AUTH_TYPE', 'Null') + snmp_user_auth_password = snmp_users[snmp_user].get('SNMP_USER_AUTH_PASSWORD', 'Null') + snmp_user_encryption_type = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_TYPE', 'Null') + snmp_user_encryption_password = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_PASSWORD', 'Null') + snmp_user_type = snmp_users[snmp_user].get('SNMP_USER_TYPE', 'Null') + snmp_user_body.append([snmp_user, snmp_user_permissions_type, snmp_user_type, snmp_user_auth_type, + snmp_user_auth_password, snmp_user_encryption_type, snmp_user_encryption_password]) + click.echo(tabulate(natsorted(snmp_user_body), snmp_user_header)) + + # 'syslog' subcommand ("show runningconfiguration syslog") @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py new file mode 100644 index 0000000000..1be2704e47 --- /dev/null +++ b/tests/config_snmp_test.py @@ -0,0 +1,872 @@ +import sys +import os +import click +from click.testing import CliRunner + +import show.main as show +import clear.main as clear +import config.main as config + +import pytest + +from unittest import mock +from unittest.mock import patch +from utilities_common.db import Db + +tabular_data_show_run_snmp_contact_expected = """\ +Contact Contact Email\n--------- --------------------\ntestuser testuser@contoso.com +""" + +json_data_show_run_snmp_contact_expected = """\ +{'testuser': 'testuser@contoso.com'} +""" + +config_snmp_contact_add_del_new_contact ="""\ +Contact name testuser and contact email testuser@contoso.com have been added to configuration +Restarting SNMP service... +""" + +config_snmp_location_add_new_location ="""\ +SNMP Location public has been added to configuration +Restarting SNMP service... +""" + + +expected_snmp_community_add_new_community_ro_output = {"TYPE": "RO"} +expected_snmp_community_add_new_community_rw_output = {"TYPE": "RW"} +expected_snmp_community_replace_existing_community_with_new_community_output = {'TYPE': 'RW'} + +expected_snmp_user_priv_ro_md5_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_md5_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_sha_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_sha_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_hmac_sha_2_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_hmac_sha_2_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_md5_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_md5_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_sha_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_sha_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_hmac_sha_2_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_hmac_sha_2_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} + +class TestSNMPConfigCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + # Add snmp community tests + def test_config_snmp_community_add_new_community_ro(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["Everest", "ro"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Everest added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Everest") == expected_snmp_community_add_new_community_ro_output + + def test_config_snmp_community_add_new_community_rw(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["Shasta", "rw"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Shasta added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Shasta") == expected_snmp_community_add_new_community_rw_output + + def test_config_snmp_community_add_new_community_with_invalid_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], ["Everest", "RT"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Invalid community type. Must be either RO or RW' in result.output + + def test_config_snmp_community_add_invalid_community_over_32_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["over_32_character_community_string", "ro"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string length should be not be greater than 32' in result.output + + def test_config_snmp_community_add_invalid_community_with_excluded_special_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["Test@snmp", "ro"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string should not have any of these special symbols' in result.output + + def test_config_snmp_community_add_existing_community(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], ["Rainer", "rw"]) + print(result.exit_code) + assert result.exit_code == 3 + assert 'SNMP community Rainer is already configured' in result.output + + # Del snmp community tests + def test_config_snmp_community_del_existing_community(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["del"], + ["Rainer"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Rainer removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Everest") == {} + + def test_config_snmp_community_del_non_existing_community(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["del"], ["Everest"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP community Everest is not configured' in result.output + + # Replace snmp community tests + def test_config_snmp_community_replace_existing_community_with_new_community(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "Everest"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Everest added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Everest") == \ + expected_snmp_community_replace_existing_community_with_new_community_output + + def test_config_snmp_community_replace_existing_community_non_existing_community(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Denali", "Everest"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Current SNMP community Denali is not configured' in result.output + + def test_config_snmp_community_replace_new_community_already_exists(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "msft"]) + print(result.exit_code) + assert result.exit_code == 3 + assert 'New SNMP community msft to replace current SNMP community Rainer already configured' in result.output + + def test_config_snmp_community_replace_with_invalid_new_community_bad_symbol(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "msft@"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string should not have any of these special symbols' in result.output + + def test_config_snmp_community_replace_with_invalid_new_community_over_32_chars(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "over_32_characters_community_string"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string length should be not be greater than 32' in result.output + + + # Del snmp contact when CONTACT not setup in REDIS + def test_config_snmp_contact_del_without_contact_redis(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], ["blah"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert 'Contact name blah is not configured' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + def test_config_snmp_contact_modify_without_contact_redis(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["blah", "blah@contoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 3 + assert 'Contact name blah is not configured' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + def test_config_snmp_contact_add_del_new_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], + ["testuser"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert 'SNMP contact testuser removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + # Add snmp contact tests + def test_config_snmp_contact_add_with_existing_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["blah", "blah@contoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Contact already exists. Use sudo config snmp contact modify instead' in result.output + + def test_config_snmp_contact_add_invalid_email(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testusercontoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert "Contact email testusercontoso.com is not valid" in result.output + + + # Delete snmp contact tests + def test_config_snmp_contact_del_new_contact_when_contact_exists(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], ["blah"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP contact blah is not configured' in result.output + + def test_config_snmp_contact_del_with_existing_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], + ["testuser"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP contact testuser removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + # Modify snmp contact tests + def test_config_snmp_contact_modify_email_with_existing_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["testuser", "testuser@test.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP contact testuser email updated to testuser@test.com' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@test.com"} + + def test_config_snmp_contact_modify_contact_and_email_with_existing_entry(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP contact testuser testuser@contoso.com already exists' in result.output + + def test_config_snmp_contact_modify_existing_contact_with_invalid_email(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["testuser", "testuser@contosocom"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert 'Contact email testuser@contosocom is not valid' in result.output + + + def test_config_snmp_contact_modify_new_contact_with_invalid_email(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["blah", "blah@contoso@com"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert 'Contact email blah@contoso@com is not valid' in result.output + + # Add snmp location tests + def test_config_snmp_location_add_exiting_location_with_same_location_already_existing(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Location already exists' in result.output + + def test_config_snmp_location_add_new_location_with_location_already_existing(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["Mile High"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Location already exists' in result.output + + # Del snmp location tests + def test_config_snmp_location_del_with_existing_location(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["del"], + ["public"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP Location public removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {} + + def test_config_snmp_location_del_new_location_with_location_already_existing(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["del"], + ["Mile High"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP Location Mile High does not exist. The location is public' in result.output + + # Modify snmp location tests + def test_config_snmp_location_modify_with_same_location(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["modify"], + ["public"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP location public already exists' in result.output + + def test_config_snmp_location_modify_without_redis(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["modify"], + ["Rainer"],obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert "Cannot modify SNMP Location. You must use 'config snmp location add " \ + "command '" in result.output + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {} + + def test_config_snmp_location_modify_without_existing_location(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["modify"], + ["Rainer"],obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert "SNMP location Rainer modified in configuration" in result.output + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "Rainer"} + + # Add snmp user tests + def test_config_snmp_user_add_invalid_user_name_over_32_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["over_32_characters_community_user", "noAUthNoPRiv", "ro"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'FAILED: SNMP user over_32_characters_community_user length should not be greater than 32 characters' \ + in result.output + + def test_config_snmp_user_add_excluded_special_characters_in_username(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["Test@user", "noAUthNoPRiv", "ro"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'FAILED: SNMP user Test@user should not have any of these special symbols' in result.output + + def test_config_snmp_user_add_existing_user(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_1", "noAUthNoPRiv", "ro"]) + print(result.exit_code) + assert result.exit_code == 14 + assert 'SNMP user test_nopriv_RO_1 is already configured' in result.output + + def test_config_snmp_user_add_invalid_user_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "nopriv", "ro"]) + print(result.exit_code) + print(result) + print(result.output) + assert result.exit_code == 2 + assert "Invalid user type. Must be one of these one of these three 'noauthnopriv' or 'authnopriv' or 'priv'" in result.output + + def test_config_snmp_user_add_invalid_permission_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "noauthnopriv", "ab"]) + print(result.exit_code) + assert result.exit_code == 3 + assert "Invalid community type. Must be either RO or RW" in result.output + + def test_config_snmp_user_add_user_type_noauthnopriv_with_unnecessary_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "noauthnopriv", "ro", "sha"]) + print(result.exit_code) + assert result.exit_code == 4 + assert "User auth type not used with 'noAuthNoPriv'. Please use 'AuthNoPriv' or 'Priv' instead" in result.output + + def test_config_snmp_user_add_user_type_authnopriv_missing_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro"]) + print(result.exit_code) + assert result.exit_code == 5 + assert "User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2" in result.output + + def test_config_snmp_user_add_user_type_authnopriv_missing_auth_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "sha"]) + print(result.exit_code) + assert result.exit_code == 7 + assert "User auth password is missing" in result.output + + def test_config_snmp_user_add_user_type_authnopriv_with_unnecessary_encrypt_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "sha", "testauthpass", "DES"]) + print(result.exit_code) + assert result.exit_code == 9 + assert "User encrypt type not used with 'AuthNoPriv'. Please use 'Priv' instead" in result.output + + def test_config_snmp_user_add_user_type_priv_missing_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro"]) + print(result.exit_code) + assert result.exit_code == 5 + assert "User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2" in result.output + + def test_config_snmp_user_add_user_type_priv_missing_auth_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5"]) + print(result.exit_code) + assert result.exit_code == 7 + assert "User auth password is missing" in result.output + + def test_config_snmp_user_add_user_type_priv_missing_encrypt_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass"]) + print(result.exit_code) + assert result.exit_code == 10 + assert "User encrypt type is missing. Must be DES or AES" in result.output + + def test_config_snmp_user_add_user_type_priv_invalid_encrypt_password_over_64_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass", "DES", + "superlongencryptionpasswordtotestbeingoverthesixtyfourcharacterlimit"]) + print(result.exit_code) + assert result.exit_code == 13 + assert "FAILED: SNMP user password length should be not be greater than 64" in result.output + + def test_config_snmp_user_add_user_type_priv_invalid_encrypt_password_excluded_special_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass", "DES", "testencrypt@pass"]) + print(result.exit_code) + assert result.exit_code == 13 + assert "FAILED: SNMP user password should not have any of these special symbols" in result.output + + def test_config_snmp_user_add_user_type_priv_invalid_encrypt_password_not_long_enough(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass", "DES", "test1"]) + print(result.exit_code) + assert result.exit_code == 13 + assert "FAILED: SNMP user password length should be at least 8 characters" in result.output + + def test_config_snmp_user_add_invalid_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "DM5", "user_auth_pass"]) + print(result.exit_code) + assert result.exit_code == 6 + assert "Invalid user authentication type. Must be one of these 'MD5', 'SHA', or 'HMAC-SHA-2'" in result.output + + def test_config_snmp_user_add_missing_auth_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "SHA", ""]) + print(result.exit_code) + assert result.exit_code == 7 + assert 'User auth password is missing' in result.output + + def test_config_snmp_user_add_invalid_encrypt_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "SHA", "user_auth_pass", "EAS", "user_encrypt_pass"]) + print(result.exit_code) + assert result.exit_code == 11 + assert "Invalid user encryption type. Must be one of these two 'DES' or 'AES'" in result.output + + def test_config_snmp_user_add_missing_encrypt_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "SHA", "user_auth_pass", "AES"]) + print(result.exit_code) + assert result.exit_code == 12 + assert 'User encrypt password is missing' in result.output + + def test_config_snmp_user_add_user_already_existing(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_1", "noauthnopriv", "ro"]) + print(result.exit_code) + assert result.exit_code == 14 + assert 'SNMP user test_nopriv_RO_1 is already configured' in result.output + + def test_config_snmp_user_add_valid_user_priv_ro_md5_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_7", "priv", "ro", "MD5", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_7 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_7") == expected_snmp_user_priv_ro_md5_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_md5_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_8", "priv", "ro", "MD5", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_8 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_8") == expected_snmp_user_priv_ro_md5_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_sha_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_9", "priv", "ro", "SHA", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_9 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_9") == expected_snmp_user_priv_ro_sha_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_sha_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_10", "priv", "ro", "SHA", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_10 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_10") == expected_snmp_user_priv_ro_sha_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_hmac_sha_2_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_11", "priv", "ro", "HMAC-SHA-2", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_11 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_11") == \ + expected_snmp_user_priv_ro_hmac_sha_2_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_hmac_sha_2_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_12", "priv", "ro", "HMAC-SHA-2", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_12 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_12") == \ + expected_snmp_user_priv_ro_hmac_sha_2_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_md5_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_7", "priv", "rw", "MD5", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_7 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_7") == expected_snmp_user_priv_rw_md5_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_md5_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_8", "priv", "rw", "MD5", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_8 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_8") == expected_snmp_user_priv_rw_md5_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_sha_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_9", "priv", "rw", "SHA", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_9 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_9") == expected_snmp_user_priv_rw_sha_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_sha_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_10", "priv", "rw", "SHA", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_10 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_10") == expected_snmp_user_priv_rw_sha_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_hmac_sha_2_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_11", "priv", "rw", "HMAC-SHA-2", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_11 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_11") == \ + expected_snmp_user_priv_rw_hmac_sha_2_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_hmac_sha_2_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_12", "priv", "rw", "HMAC-SHA-2", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_12 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_12") == \ + expected_snmp_user_priv_rw_hmac_sha_2_aes_config_db_output + + # Del snmp user tests + def test_config_snmp_user_del_valid_user(self): + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RO_1"]) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_nopriv_RO_1 removed from configuration' in result.output + + def test_config_snmp_user_del_invalid_user(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RO_2"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP user test_nopriv_RO_2 is not configured' in result.output + + @pytest.mark.parametrize("invalid_email", ['test@contoso', 'test.contoso.com', 'testcontoso@com', + '123_%contoso.com', 'mytest@contoso.comm']) + def test_is_valid_email(self, invalid_email): + output = config.is_valid_email(invalid_email) + assert output == False + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index f8ceebffbf..6c554f8f98 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -695,6 +695,172 @@ "peer_switch": "sonic-switch", "type": "ToRRouter" }, + "SNMP_COMMUNITY|msft": { + "TYPE": "RO" + }, + "SNMP_COMMUNITY|Rainer": { + "TYPE": "RW" + }, + "SNMP_USER|test_authpriv_RO_2": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RO_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_authpriv_RO_3": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RO_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RW_4": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_4_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_4_encrpytpass" + }, + "SNMP_USER|test_priv_RW_3": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_3_encrpytpass" + }, + "SNMP_USER|test_priv_RO_2": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_2_encrpytpass" + }, + "SNMP_USER|test_nopriv_RO_1": { + "SNMP_USER_TYPE": "noAuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "", + "SNMP_USER_AUTH_PASSWORD": "", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RW_1": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_1_encrpytpass" + }, + "SNMP_USER|test_authpriv_RW_1": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RW_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RO_6": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_6_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_6_encrpytpass" + }, + "SNMP_USER|test_priv_RO_1": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_1_encrpytpass" + }, + "SNMP_USER|test_priv_RO_5": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_5_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_5_encrpytpass" + }, + "SNMP_USER|test_nopriv_RW_1": { + "SNMP_USER_TYPE": "noAuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "", + "SNMP_USER_AUTH_PASSWORD": "", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RO_3": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_3_encrpytpass" + }, + "SNMP_USER|test_priv_RW_2": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_2_encrpytpass" + }, + "SNMP_USER|test_authpriv_RW_3": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RW_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RW_5": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_5_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_5_encrpytpass" + }, + "SNMP_USER|test_priv_RW_6": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_6_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_6_encrpytpass" + }, + "SNMP_USER|test_authpriv_RW_2": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RW_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RO_4": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_4_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_4_encrpytpass" + }, + "SNMP_USER|test_authpriv_RO_1": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RO_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, "DEVICE_NEIGHBOR|Ethernet0": { "name": "Servers", "port": "eth0" diff --git a/tests/show_snmp_test.py b/tests/show_snmp_test.py new file mode 100644 index 0000000000..753e20c418 --- /dev/null +++ b/tests/show_snmp_test.py @@ -0,0 +1,467 @@ +import sys +import os +import click +from click.testing import CliRunner +import pytest +import swsssdk +import traceback + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import show.main as show +import clear.main as clear +import config.main as config + +import mock_tables.dbconnector + +from unittest import mock +from unittest.mock import patch +from utilities_common.db import Db + +config_snmp_location_add_new_location ="""\ +SNMP Location public has been added to configuration +Restarting SNMP service... +""" + +config_snmp_contact_add_del_new_contact ="""\ +Contact name testuser and contact email testuser@contoso.com have been added to configuration +Restarting SNMP service... +""" + +tabular_data_show_run_snmp_contact_expected = """\ +Contact Contact Email\n--------- --------------------\ntestuser testuser@contoso.com +""" + +json_data_show_run_snmp_contact_expected = """\ +{'testuser': 'testuser@contoso.com'} +""" + +tabular_data_show_run_snmp_community_expected = """\ +Community String Community Type +------------------ ---------------- +Rainer RW +msft RO +""" + +json_data_show_run_snmp_community_expected = """\ +{'msft': {'TYPE': 'RO'}, 'Rainer': {'TYPE': 'RW'}} +""" + +tabular_data_show_run_snmp_location_expected = """\ +Location +---------- +public +""" + +json_data_show_run_snmp_location_expected = """\ +{'Location': 'public'} +""" + + +tabular_data_show_run_snmp_user_expected = """\ +User Permission Type Type Auth Type Auth Password Encryption Type Encryption Password +------------------ ----------------- ------------ ----------- --------------------------- ----------------- -------------------------- +test_authpriv_RO_1 RO AuthNoPriv MD5 test_authpriv_RO_1_authpass +test_authpriv_RO_2 RO AuthNoPriv SHA test_authpriv_RO_2_authpass +test_authpriv_RO_3 RO AuthNoPriv HMAC-SHA-2 test_authpriv_RO_3_authpass +test_authpriv_RW_1 RW AuthNoPriv MD5 test_authpriv_RW_1_authpass +test_authpriv_RW_2 RW AuthNoPriv SHA test_authpriv_RW_2_authpass +test_authpriv_RW_3 RW AuthNoPriv HMAC-SHA-2 test_authpriv_RW_3_authpass +test_nopriv_RO_1 RO noAuthNoPriv +test_nopriv_RW_1 RW noAuthNoPriv +test_priv_RO_1 RO Priv MD5 test_priv_RO_1_authpass DES test_priv_RO_1_encrpytpass +test_priv_RO_2 RO Priv MD5 test_priv_RO_2_authpass AES test_priv_RO_2_encrpytpass +test_priv_RO_3 RO Priv SHA test_priv_RO_3_authpass DES test_priv_RO_3_encrpytpass +test_priv_RO_4 RO Priv SHA test_priv_RO_4_authpass AES test_priv_RO_4_encrpytpass +test_priv_RO_5 RO Priv HMAC-SHA-2 test_priv_RO_5_authpass DES test_priv_RO_5_encrpytpass +test_priv_RO_6 RO Priv HMAC-SHA-2 test_priv_RO_6_authpass AES test_priv_RO_6_encrpytpass +test_priv_RW_1 RW Priv MD5 test_priv_RO_1_authpass DES test_priv_RW_1_encrpytpass +test_priv_RW_2 RW Priv MD5 test_priv_RO_2_authpass AES test_priv_RW_2_encrpytpass +test_priv_RW_3 RW Priv SHA test_priv_RW_3_authpass DES test_priv_RW_3_encrpytpass +test_priv_RW_4 RW Priv SHA test_priv_RW_4_authpass AES test_priv_RW_4_encrpytpass +test_priv_RW_5 RW Priv HMAC-SHA-2 test_priv_RW_5_authpass DES test_priv_RW_5_encrpytpass +test_priv_RW_6 RW Priv HMAC-SHA-2 test_priv_RW_6_authpass AES test_priv_RW_6_encrpytpass +""" + + + + +json_data_show_run_snmp_user_expected = """{'test_authpriv_RO_2': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RO_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_authpriv_RO_3': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RO_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RW_4': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_4_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_4_encrpytpass'}, 'test_priv_RW_3': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_3_encrpytpass'}, 'test_priv_RO_2': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_2_encrpytpass'}, 'test_nopriv_RO_1': {'SNMP_USER_TYPE': 'noAuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': '', 'SNMP_USER_AUTH_PASSWORD': '', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RW_1': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_1_encrpytpass'}, 'test_authpriv_RW_1': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RW_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RO_6': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_6_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_6_encrpytpass'}, 'test_priv_RO_1': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_1_encrpytpass'}, 'test_priv_RO_5': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_5_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_5_encrpytpass'}, 'test_nopriv_RW_1': {'SNMP_USER_TYPE': 'noAuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': '', 'SNMP_USER_AUTH_PASSWORD': '', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RO_3': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_3_encrpytpass'}, 'test_priv_RW_2': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_2_encrpytpass'}, 'test_authpriv_RW_3': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RW_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RW_5': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_5_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_5_encrpytpass'}, 'test_priv_RW_6': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_6_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_6_encrpytpass'}, 'test_authpriv_RW_2': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RW_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RO_4': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_4_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_4_encrpytpass'}, 'test_authpriv_RO_1': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RO_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}} +""" + +tabular_data_show_run_snmp_expected = """\ +Location +---------- +public + + +SNMP_CONTACT SNMP_CONTACT_EMAIL +-------------- -------------------- +testuser testuser@contoso.com + + +Community String Community Type +------------------ ---------------- +Rainer RW +msft RO + + +User Permission Type Type Auth Type Auth Password Encryption Type Encryption Password +------------------ ----------------- ------------ ----------- --------------------------- ----------------- -------------------------- +test_authpriv_RO_1 RO AuthNoPriv MD5 test_authpriv_RO_1_authpass +test_authpriv_RO_2 RO AuthNoPriv SHA test_authpriv_RO_2_authpass +test_authpriv_RO_3 RO AuthNoPriv HMAC-SHA-2 test_authpriv_RO_3_authpass +test_authpriv_RW_1 RW AuthNoPriv MD5 test_authpriv_RW_1_authpass +test_authpriv_RW_2 RW AuthNoPriv SHA test_authpriv_RW_2_authpass +test_authpriv_RW_3 RW AuthNoPriv HMAC-SHA-2 test_authpriv_RW_3_authpass +test_nopriv_RO_1 RO noAuthNoPriv +test_nopriv_RW_1 RW noAuthNoPriv +test_priv_RO_1 RO Priv MD5 test_priv_RO_1_authpass DES test_priv_RO_1_encrpytpass +test_priv_RO_2 RO Priv MD5 test_priv_RO_2_authpass AES test_priv_RO_2_encrpytpass +test_priv_RO_3 RO Priv SHA test_priv_RO_3_authpass DES test_priv_RO_3_encrpytpass +test_priv_RO_4 RO Priv SHA test_priv_RO_4_authpass AES test_priv_RO_4_encrpytpass +test_priv_RO_5 RO Priv HMAC-SHA-2 test_priv_RO_5_authpass DES test_priv_RO_5_encrpytpass +test_priv_RO_6 RO Priv HMAC-SHA-2 test_priv_RO_6_authpass AES test_priv_RO_6_encrpytpass +test_priv_RW_1 RW Priv MD5 test_priv_RO_1_authpass DES test_priv_RW_1_encrpytpass +test_priv_RW_2 RW Priv MD5 test_priv_RO_2_authpass AES test_priv_RW_2_encrpytpass +test_priv_RW_3 RW Priv SHA test_priv_RW_3_authpass DES test_priv_RW_3_encrpytpass +test_priv_RW_4 RW Priv SHA test_priv_RW_4_authpass AES test_priv_RW_4_encrpytpass +test_priv_RW_5 RW Priv HMAC-SHA-2 test_priv_RW_5_authpass DES test_priv_RW_5_encrpytpass +test_priv_RW_6 RW Priv HMAC-SHA-2 test_priv_RW_6_authpass AES test_priv_RW_6_encrpytpass +""" + + +class TestSNMPShowCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + # mock the redis for unit test purposes # + try: + if os.environ["UTILITIES_UNIT_TESTING"] == "1": + modules_path = os.path.join(os.path.dirname(__file__), "..") + test_path = os.path.join(modules_path, "sonic-utilities-tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, test_path) + import mock_tables.dbconnector + except KeyError: + pass + + def test_show_run_snmp_location_tabular(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], + [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_location_expected + + def test_show_run_snmp_location_json(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], + ["--json"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_location_expected + + def test_show_run_snmp_location_json_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], ["--json"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "{}" in result.output + + + def test_show_run_snmp_location_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], []) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "" in result.output + + def test_show_run_snmp_contact_tabular(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"], + [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_contact_expected + + def test_show_run_snmp_contact_json(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"], + ["--json"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_contact_expected + + def test_show_run_snmp_contact_json_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"], ["--json"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert '{}' in result.output + + def test_show_run_snmp_contact_tabular_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert '' in result.output + + + def test_show_run_snmp_community_tabular(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["community"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_community_expected + + def test_show_run_snmp_community_json(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["community"], + ["--json"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_community_expected + + def test_show_run_snmp_user_tabular(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["user"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_user_expected + + def test_show_run_snmp_user_json(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["user"], ["--json"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_user_expected + + def test_show_run_snmp_user_json_bad_key(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RO_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RO_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RO_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RO_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RO_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RO_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RW_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RW_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RW_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RW_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RW_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RW_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RO_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_nopriv_RO_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RW_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_nopriv_RW_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_4"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_4 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_5"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_5 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_6"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_6 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_4"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_4 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_5"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_5 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_6"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_6 removed from configuration' in result.output + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["user"], ["--json"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "{}" in result.output + + + def test_show_run_snmp_tabular(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_expected + + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" +