def get_report(self): """ Return final report as dict """ self._report["project_name"] = settings.getValue("project_name") self._report["version"] = settings.getValue("project_version") self._report["build_tag"] = "none" pdf = settings.getValue('pdf_file') self._report["pod_name"] = pdf['management_info']['resource_pool_name'] return self._report
def load_pdf(): """ Updates settings with PDF data """ path = settings.getValue('pdf_file') data = "" if os.path.exists(path): with open(path) as handle: data = handle.read() else: if path.find("github.com") != -1: path = path.replace("github.com", "raw.githubusercontent.com") path = path.replace("/blob", "") if path[:8] != "https://": path = "https://" + path try: resp = requests.get(path) if resp.status_code == requests.codes.ok: data = resp.text except: raise Exception(f"Invalid path: {path}") try: pdf = json.loads(data) except json.decoder.JSONDecodeError: try: pdf = yaml.safe_load(data) except yaml.parser.ParserError: raise Exception(f"Invalid PDF file: {path}") settings.setValue('pdf_file', pdf)
def load_kube_api(): """ Loads kubernetes api """ config.load_kube_config(settings.getValue('kube_config')) api = client.CoreV1Api() settings.setValue('kube_api', api)
def helmv2_disabled_check(): """ Checks for helm v2 support """ result = { 'category': 'platform', 'case_name': 'helmv2_disabled_check', 'criteria': 'pass', 'details': [] } kube = kube_api() logger = logging.getLogger(__name__) res = False pod_details = kube.list_pod_for_all_namespaces() pods = pod_details.items version_support = settings.getValue( 'pdf_file')['vim_functional']['legacy_helm_support'] if 'YES' in version_support: for pod in pods: if 'tiller' in pod.metadata.name: res = True result['details'].append(pod) if res is False: result['criteria'] = 'fail' store_result(logger, result) return result
def pod_health_check(): """ Check health of all pods and get logs of failed pods """ api = kube_api() namespace_list = settings.getValue('airship_namespace_list') result = { 'category': 'platform', 'case_name': 'pod_health_check', 'criteria': 'pass', 'details': [] } for namespace in namespace_list: pod_list = api.list_namespaced_pod(namespace) for pod in pod_list.items: pod_stats = pod_status(pod) if pod_stats['criteria'] == 'fail': pod_stats['logs'] = get_logs(pod) result['criteria'] = 'fail' result['details'].append(pod_stats) store_result(result) return result
def required_cpu_allocation_ratio(): """ Required cpu_allocation_ratio by the PDF """ pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio)
def cpu_manager_policy_check(): """ Checks cpu manager settings """ api = kube_api() logger = logging.getLogger(__name__) node_list = api.list_node() nodes = [] for node in node_list.items: nodes.append(node.metadata.name) result = { 'category': 'compute', 'case_name': 'cpu_manager_policy_check', 'criteria': 'pass', 'details': [] } for node in nodes: configz = api.connect_get_node_proxy_with_path(node, "configz") configz = ast.literal_eval(configz) res = {'node': node, 'criteria': 'pass', 'config': []} status = [] flag = True cpu_manager = settings.getValue( 'pdf_file')['vim_functional']['cpu_manager_policy'] if cpu_manager['type'] == configz['kubeletconfig']['cpuManagerPolicy']: if cpu_manager['type'] == 'static': if cpu_manager['reconcile_period'] == configz['kubeletconfig'][ 'cpuManagerReconcilePeriod']: if cpu_manager['full_pcpus'] == configz['kubeletconfig'][ 'full-pcpus-only']: flag = flag and True else: flag = flag and False else: flag = flag and True else: flag = flag and False if flag is False: res['criteria'] = 'fail' status.append(cpu_manager) res['config'] = status result['details'].append(res) if flag is False: result['criteria'] = 'fail' store_result(logger, result) return result
def required_os_reserved_cores(): """ Returns value of os_reserved_cores from platform_profile used by Role for worker nodes in PDF :return: os_reserved_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores']
def get_processor_profile(profile_name): """ Searches and returns processor_profile with `profile_name` """ processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details
def get_role(role_name): """ Searches and returns role with `role_name` """ roles = settings.getValue('pdf_file')['roles'] for role in roles: if role['name'] == role_name: role_details = role return role_details
def required_nova_scheduler_filters(): """ Required nova scheduler_filters by the PDF """ pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return filters
def physical_network_check(): """ physical_network_check """ logger = logging.getLogger(__name__) ml2_config = neutron_ml2_config() physical_networks = settings.getValue('pdf_file')['physical_networks'] type_drivers = ml2_config.get('ml2', 'type_drivers').split(',') flat_networks = ml2_config.get('ml2_type_flat', 'flat_networks').split(',') vlan_networks = [] network_vlan_ranges = ml2_config.get('ml2_type_vlan', 'network_vlan_ranges').split(',') for network in network_vlan_ranges: vlan_networks.append(network.split(':')[0]) result = { 'category': 'network', 'case_name': 'physical_network_check', 'criteria': 'pass', 'details': [] } for physnet in physical_networks: res = { 'network_name': physnet['name'], 'type': physnet['type'], 'criteria': 'fail' } if physnet['type'] in type_drivers: if physnet['type'] == 'flat': if physnet['name'] in flat_networks or '*' in flat_networks: res['criteria'] = 'pass' else: res['details'] = 'physical network name not found' if physnet['type'] == 'vlan': if physnet['name'] in vlan_networks: res['criteria'] = 'pass' else: res['details'] = 'physical network name not found' else: res['details'] = 'physical network type not found' result['details'].append(res) if res['criteria'] == 'fail': result['criteria'] = 'fail' store_result(logger, result) return result
def pod_health_check(): """ Check health of all pods and get logs of failed pods """ logger = logging.getLogger(__name__) api = kube_api() namespace_list = settings.getValue('airship_namespace_list') result = checks.pod_health_check(logger, api, namespace_list) store_result(logger, result) return result
def cni_plugin_check(): """ Checks for CNI plugins and validate against PDF """ apps_instance = client.AppsV1Api() api_instance = kube_api() result = { 'category': 'network', 'case_name': 'cni_plugin_check', 'criteria': 'pass', 'details': [] } logger = logging.getLogger(__name__) create_daemonset(apps_instance) pod_details = api_instance.list_namespaced_pod('default', watch=False) pods = pod_details.items daemon_pods = [] status = [] cmd = ['ls', '/opt/cni/bin'] cni_plugins = settings.getValue( 'pdf_file')['vim_functional']['cnis_supported'] for pod in pods: if 'plugin-check-test-set' in pod.metadata.name: try: list_of_cni_from_dir = kube_exec(pod, cmd) for plugin in cni_plugins: if plugin not in list_of_cni_from_dir: result['criteria'] = 'fail' status.append(list_of_cni_from_dir) daemon_pods.append(pod.metadata.name) except ConnectionError as error: status.append(error) except RuntimeError as error: status.append(error) except Exception as error: result['criteria'] = 'fail' status.append(error) apps_instance.delete_namespaced_daemon_set('plugin-check-test-set', 'default') result['details'].append(daemon_pods) result['details'].append(status) store_result(logger, result) return result
def rfile_save(rfile_obj, prefix='zz'): """ Takes rfile Object and stores it into random file returning filename """ letters = string.ascii_lowercase suffix = ''.join(random.choice(letters) for i in range(6)) filename = settings.getValue('results_path') + f'{prefix}-{suffix}.txt' if os.path.isfile(filename): return rfile_save(rfile_obj, prefix) else: with open(filename, 'w') as fhandle: fhandle.write(rfile_obj.get_data()) return f'{prefix}-{suffix}.txt'
def get_hardware_profile_by_role(role_name): """ Returns hardware profile details of a role """ role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details = profile return profile_details
def validate(self): """ Validation method for kuberef """ self._report['scenario'] = 'none' self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S') test_suite = settings.getValue("test_suite") if test_suite == "default": self._report['case_name'] = 'default_kuberef' self.default_suite() self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
def load_settings(self): """ Load all required settings otherwise set to default Settings to load: * ``result_path`` (default: /tmp/local/) * ``results_filename`` (default: results.json) """ try: path = settings.getValue('results_path') except AttributeError: path = '/tmp/local/' settings.setValue('results_path', path) try: filename = settings.getValue('results_filename') except AttributeError: filename = 'results.json' settings.setValue('results_filename', filename) if not os.path.exists(path): os.makedirs(path) self._path = path self._filename = path + filename
def load_pdf(): """ Updates settings with PDF data """ filename = settings.getValue('pdf_file') with open(filename) as handle: data = handle.read() try: pdf = json.loads(data) except json.decoder.JSONDecodeError: try: pdf = yaml.safe_load(data) except yaml.parser.ParserError: raise Exception(f"Invalid PDF file: {filename}") settings.setValue('pdf_file', pdf)
def validate(self): """ Validation method """ self._report['scenario'] = 'none' self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S') test_suite = settings.getValue("test_suite") if test_suite == "default": self._report['case_name'] = 'ook_airship' self.default_suite() delete_kube_curl_pod() self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
def liveness_probe_check(): """ Checks whether the liveness probe is configured for all overcloud components deployed as pods on undercloud Kubernetes. """ logger = logging.getLogger(__name__) api = kube_api() namespace_list = settings.getValue('airship_namespace_list') result = { 'category': 'platform', 'case_name': 'liveness_probe_check', 'criteria': 'pass', 'details': [] } for namespace in namespace_list: pod_list = api.list_namespaced_pod(namespace) for pod in pod_list.items: pod_stats = { 'criteria': 'pass', 'name': pod.metadata.name, 'namespace': pod.metadata.namespace, 'node': pod.spec.node_name, 'containers': [] } for container in pod.spec.containers: container_stats = { 'name': container.name, 'liveness_probe': None } if hasattr(container, 'liveness_probe' ) and container.liveness_probe is not None: container_stats[ 'liveness_probe'] = container.liveness_probe else: result['criteria'] = 'fail' pod_stats['criteria'] = 'fail' pod_stats['containers'].append(container_stats) result['details'].append(pod_stats) store_result(logger, result) return result
def trace_os_reserved_cores(): """ Trace os_reserved_cores from Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) """ worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string with comma separated value return ','.join(map(str, list(os_reserved_cores)))
def kube_api(): """ Returns kube_api object """ return settings.getValue('kube_api')