Skip to content

Commit

Permalink
Merge pull request #202 from sassoftware/develop
Browse files Browse the repository at this point in the history
Merge develop to main
  • Loading branch information
kevinlinglesas authored Jul 11, 2023
2 parents 19eb21b + 2d3d973 commit 2096d17
Show file tree
Hide file tree
Showing 24 changed files with 227 additions and 827 deletions.
60 changes: 60 additions & 0 deletions deployment_report/model/utils/config_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,11 @@
_PGCLUSTER_SSL_KEY_ = "ssl"
_PGCLUSTER_CONNECT_KEY_ = "connection"

# SAS postgres dataserver keys
_DATASERVER_REGISTRATIONS_KEY_ = "registrations"
_DATASERVER_DATABASES_KEY_ = "databases"
_DATASERVER_NAME_KEY_ = "name"

# database info values
_EXTERNAL_DB_ = "External"
_INTERNAL_DB_ = "Internal"
Expand All @@ -53,6 +58,7 @@

# database name key values
_DBNAME_POSTGRES_ = "postgres"
_DBNAME_DATASERVERS_POSTGRES_ = "sas-platform-postgres"
_DBNAME_CONFIG_POSTGRES_ = "sas-postgres-config"
_DBNAME_CPSPOSTGRES_ = "cpspostgres"
_DBNAME_CONFIG_CPSPOSTGRES_ = "sas-planning-cpspostgres-config"
Expand Down Expand Up @@ -121,6 +127,10 @@ def get_db_info(resource_cache: Dict) -> Dict:
pgclusters: Dict = resource_cache[ResourceTypeValues.SAS_CRUNCHYCLUSTERS][ITEMS_KEY]
db_dict = _get_db_info_v3(pgclusters)

if not db_dict and ResourceTypeValues.SAS_DATASERVERS in resource_cache.keys():
dataservers: Dict = resource_cache[ResourceTypeValues.SAS_DATASERVERS][ITEMS_KEY]
db_dict = _get_db_info_v4(dataservers)

if not db_dict and ResourceTypeValues.SAS_PGCLUSTERS in resource_cache.keys():
pgclusters: Dict = resource_cache[ResourceTypeValues.SAS_PGCLUSTERS][ITEMS_KEY]
db_dict = _get_db_info_v2(pgclusters)
Expand Down Expand Up @@ -292,6 +302,56 @@ def _get_db_info_v2(pgclusters: Dict) -> Dict:
return db_dict


def _get_db_info_v4(pgclusters: Dict) -> Dict:
"""
Returns the db information of the targeted SAS deployment after evaluation of the dataserver
resources with the viya deployment
Note: A DataServer resource encapsulates the connection info & credentials for a single Postgres server,
which can be either internal or external. The DataServer resource names
will always be either "sas-platform-postgres" or "sas-cds-postgres".
"sas-platform-postgres" will always exist, whereas "sas-cds-postgres" only exists if certain products
are included in the Viya orderable.
:param pgclusters: The dataservers resource to evaluate.
:return: A dictionary representing the db information of the targeted SAS deployment.
"""
# initialize the return value
db_dict: Dict = dict()

for key in pgclusters:
try:
resource_definition = pgclusters[key][Keys.ResourceDetails.RESOURCE_DEFINITION]

db_name: Optional[Text] = resource_definition.get_name()
db_data: Optional[Dict] = resource_definition.get_spec()

dbs: Dict = dict()

if not db_data:
continue

try:
dbs = {
Keys.DatabaseDetails.DBNAME: db_data[_DATASERVER_DATABASES_KEY_][0][_DATASERVER_NAME_KEY_],
Keys.DatabaseDetails.DBSSL: db_data[_PGCLUSTER_SSL_KEY_],
Keys.DatabaseDetails.DBHOST: db_data[_DATASERVER_REGISTRATIONS_KEY_][0][_PGCLUSTER_HOST_KEY_],
Keys.DatabaseDetails.DBPORT: db_data[_DATASERVER_REGISTRATIONS_KEY_][0][_PGCLUSTER_PORT_KEY_],
}
except KeyError:
dbs = {
Keys.DatabaseDetails.DBCONN: _UNAVAIL_DB_
}

if dbs:
# name is always "sas-platform-postgres" or "sas-cds-postgres".
db_dict[db_name] = dbs
except KeyError:
continue

return db_dict


def get_configmaps_info(resource_cache: Dict) -> Optional[Dict]:
"""
Returns the configmaps of the targeted SAS deployment.
Expand Down
20 changes: 20 additions & 0 deletions deployment_report/model/utils/test/test_config_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,26 @@ def test_get_db_info_v2(no_ingress_simulation_fixture: conftest.DSA):
assert db_dict[config_util._DBNAME_POSTGRES_][Keys.DatabaseDetails.DBTYPE] == KubectlTest.Values.DB_External


@pytest.mark.usefixtures(conftest.NO_INGRESS_SIMULATION_FIXTURE)
def test_get_db_info_v4(no_ingress_simulation_fixture: conftest.DSA):
"""
This test verifies that the provided db data is returned when values is passed to
_get_db_info_v4().
"""
# get the test resource cache
resource_cache: Dict = no_ingress_simulation_fixture.resource_cache()

pgclusters: Dict = resource_cache[ResourceTypeValues.SAS_DATASERVERS][ITEMS_KEY]
db_dict: Dict = config_util._get_db_info_v4(pgclusters=pgclusters)

# KeyError expected because DBTYPE is not set in db info with ResourceTypeValues.SAS_DATASERVERS
with pytest.raises(KeyError):
assert db_dict[config_util._DBNAME_DATASERVERS_POSTGRES_][Keys.DatabaseDetails.DBTYPE] == \
KubectlTest.Values.DB_External
assert db_dict[config_util._DBNAME_DATASERVERS_POSTGRES_][Keys.DatabaseDetails.DBPORT] == \
KubectlTest.Values.DB_DATASERVER_PORT


@pytest.mark.usefixtures(conftest.NO_INGRESS_SIMULATION_FIXTURE)
def test_get_db_info_v3(no_ingress_simulation_fixture: conftest.DSA):
"""
Expand Down
69 changes: 0 additions & 69 deletions pre_install_report/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,53 +91,6 @@ The following command provides usage details:
python3 viya-ark.py pre-install-report -h
```

### Supported Ingress Values
The tool currently supports the following ingress controllers: _nginx, openshift_.
Other ingress controllers are not evaluated. Select _openshift_ if you are deploying on Red Hat OpenShift.

### Hints
**Note:** The values for the Ingress Host and Port values are not required if you specify an ingress value
of _openshift_. The Ingress Host and Port values must be specified if you specify an ingress
value of _nginx_.

The values for the Ingress Host and Ingress Port options can be determined with kubectl commands.

The following section provides hints for a _nginx_ ingress controller of Type LoadBalancer.
The following commands may need to be modified to suit your ingress controller deployment.

You must specify the namespace where the ingress controller is available as well as the ingress controller name:

```
kubectl -n <nginx-ingress-namespace> get svc <nginx-ingress-controller-name>
```


Here is sample output from the command:

```
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller LoadBalancer 10.0.00.000 55.147.22.101 80:31254/TCP,443:31383/TCP 28d
```

Use the following commands to determine the parameter values:

```
export INGRESS_HOST=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.status.loadBalancer.ingress[*].ip}')
export INGRESS_HTTP_PORT=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
export INGRESS_HTTPS_PORT=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
```
The command to determine the Ingress Host may be slightly different with Amazon Elastic Kubernetes Service(EKS):
```
export INGRESS_HOST=externalIP=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.status.loadBalancer.ingress[*].hostname}')
```

Use the values gathered on the command line for http or https as appropriate for your deployment:

```
python3 viya-ark.py pre-install-report -i nginx -H $INGRESS_HOST -p $INGRESS_HTTP_PORT
python3 viya-ark.py pre-install-report -i nginx -H $INGRESS_HOST -p $INGRESS_HTTPS_PORT
```

## Report Output

The tool generates the pre-install check report, viya_pre_install_report_<timestamp>.html. The report is in a
Expand All @@ -151,25 +104,3 @@ minimum and aggregate settings for CPU and memory on nodes. For more information
If you modify the VIYA_K8S_VERSION_MIN to a version less than the minimum Kubernetes version supported by this
release of the report tool, you are operating outside the supported capabilities of the report tool. SAS recommends
using a release of SAS Viya 4 ARK tools that matches the required minimum you are working with.

## Known Issues

The following issue may impact the performance and expected results of this tool.
- All Nodes in a cluster must be in the READY state before running the tool.
- If all the Nodes are not in the READY state, the tool takes longer to run. Wait for it to complete.
Also, the tool may not be able to clean up the pods and replicaset created in the specified namespace as shown in
the example output below. If that happens, the pods and replicaset must be manually deleted.
They will look similar to the resources shown below:
```
NAME READY STATUS RESTARTS AGE
pod/hello-world-6665cf748b-5x2jq 0/1 Pending 0 115m
pod/hello-world-6665cf748b-tkq79 0/1 Pending 0 115m
NAME DESIRED CURRENT READY AGE
replicaset.apps/hello-world-6665cf748b 2 2 0 115m
Suggested commands to delete resources before running the tool again:
kubectl -n <namespace> delete replicaset.apps/hello-world-6665cf748b
kubectl -n <namespace> delete pod/hello-world-6665cf748b-5x2jq
kubectl -n <namespace> delete pod/hello-world-6665cf748b-tkq79
```
26 changes: 2 additions & 24 deletions pre_install_report/library/pre_install_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def __init__(self, sas_logger: ViyaARKLogger, viya_k8s_version_min,
self._calculated_aggregate_memory = None
self._workers = 0
self._aggregate_nodeStatus_failures = 0
self._ingress_controller = None
self._k8s_server_version = None

def _parse_release_info(self, release_info):
Expand Down Expand Up @@ -160,9 +159,8 @@ def _k8s_server_version_min(self):
self.logger.exception(viya_messages.EXCEPTION_MESSAGE.format(str(cpe)))
sys.exit(viya_messages.RUNTIME_ERROR_RC_)

def check_details(self, kubectl, ingress_port, ingress_host, ingress_controller,
def check_details(self, kubectl,
output_dir):
self._ingress_controller = ingress_controller
self._kubectl = kubectl
name_space = kubectl.get_namespace()
self.logger.info("names_space: {} ".format(name_space))
Expand Down Expand Up @@ -197,9 +195,6 @@ def check_details(self, kubectl, ingress_port, ingress_host, ingress_controller,
global_data = self.evaluate_nodes(nodes_data, global_data, cluster_info, quantity_)

params = {}
params[viya_constants.INGRESS_CONTROLLER] = ingress_controller
params[viya_constants.INGRESS_HOST] = str(ingress_host)
params[viya_constants.INGRESS_PORT] = str(ingress_port)
params[viya_constants.PERM_CLASS] = utils
params[viya_constants.SERVER_K8S_VERSION] = self._k8s_server_version
params['logger'] = self.sas_logger
Expand All @@ -214,13 +209,10 @@ def check_details(self, kubectl, ingress_port, ingress_host, ingress_controller,
self.logger.warn("WARN: Review Cluster Aggregate Report")
if(any(ele in str(permissions_check.get_namespace_admin_permission_aggregate()) for ele in test_list)):
self.logger.warn("WARN: Review Namespace Aggregate Report")
if(any(ele in str(permissions_check.get_namespace_admin_permission_data()) for ele in test_list)):
self.logger.warn("WARN: Review Namespace Permissions")

self.generate_report(global_data, master_data, configs_data, storage_data, namespace_data,
permissions_check.get_cluster_admin_permission_data(),
permissions_check.get_namespace_admin_permission_data(),
permissions_check.get_ingress_data(),
permissions_check.get_namespace_admin_permission_aggregate(),
permissions_check.get_cluster_admin_permission_aggregate(),
output_dir)
Expand Down Expand Up @@ -474,27 +466,15 @@ def _check_permissions(self, permissions_check: PreCheckPermissions):
permissions_check.get_sc_resources()

permissions_check.manage_pvc(viya_constants.KUBECTL_APPLY, False)
permissions_check.check_sample_application()
permissions_check.check_sample_ingress()
if self._ingress_controller == viya_constants.OPENSHIFT_INGRESS:
permissions_check.check_openshift_route()
permissions_check.check_deploy_crd()
permissions_check.check_rbac_role()
permissions_check.check_create_custom_resource()
permissions_check.check_get_custom_resource(namespace)

if self._ingress_controller == viya_constants.OPENSHIFT_INGRESS:
permissions_check.get_openshift_route_details()
permissions_check.check_openshift_route_host_port()
permissions_check.check_delete_custom_resource()
permissions_check.check_rbac_delete_role()

permissions_check.check_sample_response()
permissions_check.check_delete_crd()
permissions_check.check_delete_sample_application()
permissions_check.check_delete_sample_ingress()
if self._ingress_controller == viya_constants.OPENSHIFT_INGRESS:
permissions_check.check_delete_openshift_route()

# Check the status of deployed PVCs
permissions_check.manage_pvc(viya_constants.KUBECTL_APPLY, True)
# Delete all Deployed PVCs
Expand Down Expand Up @@ -1075,7 +1055,6 @@ def generate_report(self,
namespace_data,
cluster_admin_permission_data,
namespace_admin_permission_data,
ingress_data,
ns_admin_permission_aggregate,
cluster_admin_permission_aggregate,
output_directory=""):
Expand All @@ -1102,7 +1081,6 @@ def generate_report(self,
namespace_data=namespace_data,
cluster_admin_permission_data=cluster_admin_permission_data.items(),
namespace_admin_permission_data=namespace_admin_permission_data.items(),
ingress_data=ingress_data.items(),
namespace_admin_permission_aggregate=ns_admin_permission_aggregate['Permissions'],
cluster_admin_permission_aggregate=cluster_admin_permission_aggregate['Permissions'],
cluster_creation_info=viya_messages.CLUSTER_CREATION_INFO,
Expand Down
Loading

0 comments on commit 2096d17

Please sign in to comment.