def multi_interface_cni_check(): """ Checks if multi interface cni is enabled """ apps_instance = client.AppsV1Api() api_instance = kube_api() logger = logging.getLogger(__name__) result = { 'category': 'network', 'case_name': 'multi_interface_cni_check', 'criteria': 'pass', 'details': [] } create_daemonset(apps_instance) pod_details = api_instance.list_namespaced_pod('default', watch=False) pods = pod_details.items status = [] cmd = ['ls', '/etc/cni/net.d'] for pod in pods: if 'plugin-check-test-set' in pod.metadata.name: try: list_of_plugin_conf = kube_exec(pod, cmd) list_of_plugin_conf = list_of_plugin_conf.split("\n") cmd3 = ['cat', "/etc/cni/net.d/" + list_of_plugin_conf[0]] multi_interface_conf = kube_exec(pod, cmd3) if 'multus' not in multi_interface_conf: result['criteria'] = 'fail' status.append(list_of_plugin_conf) status.append(multi_interface_conf) except ConnectionError as error: status.append(error) except RuntimeError as error: status.append(error) except Exception as error: result['criteria'] = 'fail' status.append(error) apps_instance.delete_namespaced_daemon_set('plugin-check-test-set', 'default') result['details'].append(status) store_result(logger, result) return result
def get_neutron_ml2_conf_from_pod(pod): """ Reads ml2 config from neutron pod """ cmd = ['ls', '/etc/neutron/plugins/ml2/'] response = kube_exec(pod, cmd) files = response.rstrip("\n").split() response = [] for filename in files: cmd = ['cat', '/etc/neutron/plugins/ml2/' + filename] conf = kube_exec(pod, cmd) response.append(conf) return response
def ceph_health_check(): """ Check health of Ceph """ pod = get_pod_with_labels('application=ceph,component=mon') cmd = ['ceph', 'health', '-f', 'json'] response = kube_exec(pod, cmd) response = ast.literal_eval(response) result = { 'category': 'storage', 'case_name': 'ceph_health_check', 'details': [] } if response['status'] == 'HEALTH_OK': result['criteria'] = 'pass' result['details'] = 'HEALTH_OK' else: result['criteria'] = 'fail' result['details'] = response store_result(result) return result
def trace_vswitch_dpdk_lcores(): """ Trace vswitch_dpdk_lcores from Airship deployment :return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl """ ovs_pod = get_pod_with_labels( 'application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str to json str match = re.findall("[a-zA-Z0-9-]+=", response) for key in match: response = response.replace(key, '"' + key[:-1] + '":') match = re.findall(":[a-zA-Z0-9-]+", response) for key in match: response = response.replace(key[1:], '"' + key[1:] + '"') config = json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores
def cni_plugin_check(): """ Checks for CNI plugins and validate against PDF """ apps_instance = client.AppsV1Api() api_instance = kube_api() result = { 'category': 'network', 'case_name': 'cni_plugin_check', 'criteria': 'pass', 'details': [] } logger = logging.getLogger(__name__) create_daemonset(apps_instance) pod_details = api_instance.list_namespaced_pod('default', watch=False) pods = pod_details.items daemon_pods = [] status = [] cmd = ['ls', '/opt/cni/bin'] cni_plugins = settings.getValue( 'pdf_file')['vim_functional']['cnis_supported'] for pod in pods: if 'plugin-check-test-set' in pod.metadata.name: try: list_of_cni_from_dir = kube_exec(pod, cmd) for plugin in cni_plugins: if plugin not in list_of_cni_from_dir: result['criteria'] = 'fail' status.append(list_of_cni_from_dir) daemon_pods.append(pod.metadata.name) except ConnectionError as error: status.append(error) except RuntimeError as error: status.append(error) except Exception as error: result['criteria'] = 'fail' status.append(error) apps_instance.delete_namespaced_daemon_set('plugin-check-test-set', 'default') result['details'].append(daemon_pods) result['details'].append(status) store_result(logger, result) return result
def get_nova_conf(): """ Returns parsed nova.conf """ pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return config
def trace_isolated_cores(): """ Trace isolated_cores from Airship deployment :return: value traced from `isolcpus` key in `/proc/cmdline` """ pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option in proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value = split_key_value(option) break return isolcpus_value
def privilege_check(): """ Checks if privileged pods are possible to created """ kube = kube_api() logger = logging.getLogger(__name__) pod_manifest = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'security-privileges-demo', }, 'spec': { 'containers': [{ 'image': 'alpine:3.2', 'name': 'security-privileges-demo', 'command': ["/bin/sh", "-c", "sleep 60m"], 'securityContext': { 'privileged': True } }] } } result = { 'category': 'platform', 'case_name': 'privilege_check', 'criteria': 'pass', 'details': [] } status = [] try: pod_priv = kube.create_namespaced_pod(body=pod_manifest, namespace='default') time.sleep(5) cmd = ['ps', 'aux'] response = kube_exec(pod_priv, cmd) if "root" in response: result['criteria'] = 'fail' status.append(response) kube.delete_namespaced_pod(name=pod_priv.metadata.name, namespace='default') except KeyError as error: status.append(error) except RuntimeError as error: status.append(error) except Exception as error: kube.delete_namespaced_pod(name=pod_priv.metadata.name, namespace='default') result['criteria'] = 'fail' status.append(error) result['details'].append(status) store_result(logger, result) return result
def capability_check(): """ Checks if creation of pods with particular capabilties is possible """ kube = kube_api() logger = logging.getLogger(__name__) pod_manifest = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'security-capability-demo', }, 'spec': { 'containers': [{ 'image': 'alpine:3.2', 'name': 'security-capability-demo', 'command': ["/bin/sh", "-c", "sleep 60m"], 'securityContext': { 'capabilities': { 'drop': ["ALL"], 'add': ['NET_ADMIN', 'NET_RAW'] } } }] } } result = { 'category': 'platform', 'case_name': 'capability_check', 'criteria': 'pass', 'details': [] } status = [] try: pod_cap = kube.create_namespaced_pod(body=pod_manifest, namespace='default') time.sleep(6) cmd = ['cat', '/proc/1/status'] response = kube_exec(pod_cap, cmd) if "0000000000003000" in response: result['criteria'] = 'fail' status.append(pod_cap) kube.delete_namespaced_pod(name=pod_cap.metadata.name, namespace='default') except KeyError as error: status.append(error) except RuntimeError as error: status.append(error) except Exception as error: kube.delete_namespaced_pod(name=pod_cap.metadata.name, namespace='default') result['criteria'] = 'fail' status.append(error) result['details'].append(status) store_result(logger, result) return result