Beispiel #1
0
    def check_es_cluster_health(self, pods_by_name):
        """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
        errors = []
        for pod_name in pods_by_name.keys():
            cluster_health_cmd = self._build_es_curl_cmd(
                pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
            cluster_health_data = self.exec_oc(
                cluster_health_cmd, [], save_as_name='get_es_health.json')
            try:
                health_res = json.loads(cluster_health_data)
                if not health_res or not health_res.get('status'):
                    raise ValueError()
            except ValueError:
                errors.append(
                    OpenShiftCheckException(
                        'BadEsResponse',
                        'Could not retrieve cluster health status from logging ES pod "{pod}".\n'
                        'Response was:\n{output}'.format(
                            pod=pod_name, output=cluster_health_data)))
                continue

            if health_res['status'] not in ['green', 'yellow']:
                errors.append(
                    OpenShiftCheckException(
                        'EsClusterHealthRed',
                        'Elasticsearch cluster health status is RED according to pod "{}"'
                        .format(pod_name)))

        return errors
Beispiel #2
0
    def curl_kibana_with_uuid(self, kibana_pod):
        """curl Kibana with a unique uuid."""
        uuid = self.generate_uuid()
        pod_name = kibana_pod["metadata"]["name"]
        exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}"
        exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid)

        error_str = self.exec_oc(exec_cmd, [])

        try:
            error_code = json.loads(error_str)["statusCode"]
        except (KeyError, ValueError):
            raise OpenShiftCheckException(
                'kibanaInvalidResponse',
                'invalid response returned from Kibana request:\n'
                'Command: {}\nResponse: {}'.format(exec_cmd, error_str)
            )

        if error_code != 404:
            raise OpenShiftCheckException(
                'kibanaInvalidReturnCode',
                'invalid error code returned from Kibana request.\n'
                'Expecting error code "404", but got "{}" instead.'.format(error_code)
            )

        return uuid
Beispiel #3
0
    def exec_diagnostic(self, diagnostic):
        """
        Execute an 'oc adm diagnostics' command on the remote host.
        Raises OcNotFound or registers OcDiagFailed.
        Returns True on success or False on failure (non-zero rc).
        """
        config_base = self.get_var("openshift.common.config_base")
        args = {
            "config_file": os.path.join(config_base, "master",
                                        "admin.kubeconfig"),
            "cmd": "adm diagnostics",
            "extra_args": [diagnostic],
        }

        result = self.execute_module("ocutil",
                                     args,
                                     save_as_name=diagnostic + ".failure.json")
        self.register_file(diagnostic + ".txt", result['result'])
        if result.get("failed"):
            if result['result'] == '[Errno 2] No such file or directory':
                raise OpenShiftCheckException(
                    "OcNotFound",
                    "This host is supposed to be a master but does not have the `oc` command where expected.\n"
                    "Has an installation been run on this host yet?")

            self.register_failure(
                OpenShiftCheckException(
                    'OcDiagFailed',
                    'The {diag} diagnostic reported an error:\n'
                    '{error}'.format(diag=diagnostic, error=result['result'])))
            return False
        return True
    def get_vg_free(self, pool):
        """Determine which VG to examine according to the pool name. Return: size vgs reports.
        Pool name is the only indicator currently available from the Docker API driver info.
        We assume a name that looks like "vg--name-docker--pool";
        vg and lv names with inner hyphens doubled, joined by a hyphen.
        """
        match = re.match(r'((?:[^-]|--)+)-(?!-)', pool)  # matches up to the first single hyphen
        if not match:  # unlikely, but... be clear if we assumed wrong
            raise OpenShiftCheckException(
                "This host's Docker reports it is using a storage pool named '{}'.\n"
                "However this name does not have the expected format of 'vgname-lvname'\n"
                "so the available storage in the VG cannot be determined.".format(pool)
            )
        vg_name = match.groups()[0].replace("--", "-")
        vgs_cmd = "/sbin/vgs --noheadings -o vg_free --units g --select vg_name=" + vg_name
        # should return free space like "  12.00g" if the VG exists; empty if it does not

        ret = self.execute_module("command", {"_raw_params": vgs_cmd})
        if ret.get("failed") or ret.get("rc", 0) != 0:
            raise OpenShiftCheckException(
                "Is LVM installed? Failed to run /sbin/vgs "
                "to determine docker storage usage:\n" + ret.get("msg", "")
            )
        size = ret.get("stdout", "").strip()
        if not size:
            raise OpenShiftCheckException(
                "This host's Docker reports it is using a storage pool named '{pool}'.\n"
                "which we expect to come from local VG '{vg}'.\n"
                "However, /sbin/vgs did not find this VG. Is Docker for this host"
                "running and using the storage on the host?".format(pool=pool, vg=vg_name)
            )
        return size
Beispiel #5
0
    def curl_kibana_with_uuid(self, kibana_pod):
        """curl Kibana with a unique uuid."""
        uuid = self.generate_uuid()
        pod_name = kibana_pod["metadata"]["name"]
        exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}"
        exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid)

        error_str = self.exec_oc(self.logging_namespace, exec_cmd, [])

        try:
            error_code = json.loads(error_str)["statusCode"]
        except KeyError:
            msg = ('invalid response returned from Kibana request (Missing "statusCode" key):\n'
                   'Command: {}\nResponse: {}').format(exec_cmd, error_str)
            raise OpenShiftCheckException(msg)
        except ValueError:
            msg = ('invalid response returned from Kibana request (Non-JSON output):\n'
                   'Command: {}\nResponse: {}').format(exec_cmd, error_str)
            raise OpenShiftCheckException(msg)

        if error_code != 404:
            msg = 'invalid error code returned from Kibana request. Expecting error code "404", but got "{}" instead.'
            raise OpenShiftCheckException(msg.format(error_code))

        return uuid
Beispiel #6
0
    def check_elasticsearch_node_list(self, pods_by_name):
        """Check that reported ES masters are accounted for by pods. Returns: list of errors"""

        if not pods_by_name:
            return [OpenShiftCheckException(
                'MissingComponentPods',
                'No logging Elasticsearch pods were found.'
            )]

        # get ES cluster nodes
        node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
        cluster_node_data = self.exec_oc(node_cmd, [], save_as_name="get_es_nodes.json")
        try:
            cluster_nodes = json.loads(cluster_node_data)['nodes']
        except (ValueError, KeyError):
            return [OpenShiftCheckException(
                'MissingNodeList',
                'Failed to query Elasticsearch for the list of ES nodes. The output was:\n' +
                cluster_node_data
            )]

        # Try to match all ES-reported node hosts to known pods.
        errors = []
        for node in cluster_nodes.values():
            # Note that with 1.4/3.4 the pod IP may be used as the master name
            if not any(node['host'] in (pod_name, pod['status'].get('podIP'))
                       for pod_name, pod in pods_by_name.items()):
                errors.append(OpenShiftCheckException(
                    'EsPodNodeMismatch',
                    'The Elasticsearch cluster reports a member node "{node}"\n'
                    'that does not correspond to any known ES pod.'.format(node=node['host'])
                ))

        return errors
Beispiel #7
0
    def run(self):
        """Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs."""
        try:
            log_index_timeout = int(
                self.get_var("openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS)
            )
        except ValueError:
            raise OpenShiftCheckException(
                'InvalidTimeout',
                'Invalid value provided for "openshift_check_logging_index_timeout_seconds". '
                'Value must be an integer representing an amount in seconds.'
            )

        running_component_pods = dict()

        # get all component pods
        for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']):
            pods = self.get_pods_for_component(component)
            running_pods = self.running_pods(pods)

            if not running_pods:
                raise OpenShiftCheckException(
                    component + 'NoRunningPods',
                    'No {} pods in the "Running" state were found.'
                    'At least one pod is required in order to perform this check.'.format(name)
                )

            running_component_pods[component] = running_pods

        uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0])
        self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout)
        return {}
    def check_fluentd_env_var(self):
        """Read and return the value of the 'USE_JOURNAL' environment variable on a fluentd pod."""
        running_pods = self.running_fluentd_pods()

        try:
            pod_containers = running_pods[0]["spec"]["containers"]
        except KeyError:
            return "Unable to detect running containers on selected Fluentd pod."

        if not pod_containers:
            msg = ('There are no running containers on selected Fluentd pod "{}".\n'
                   'Unable to calculate expected logging driver.').format(running_pods[0]["metadata"].get("name", ""))
            raise OpenShiftCheckException(msg)

        pod_env = pod_containers[0].get("env")
        if not pod_env:
            msg = ('There are no environment variables set on the Fluentd container "{}".\n'
                   'Unable to calculate expected logging driver.').format(pod_containers[0].get("name"))
            raise OpenShiftCheckException(msg)

        for env in pod_env:
            if env["name"] == "USE_JOURNAL":
                return env.get("value", "false") != "false"

        return False
Beispiel #9
0
    def check_elasticsearch_diskspace(self, pods_by_name):
        """
        Exec into an ES pod and query the diskspace on the persistent volume.
        Returns: list of errors
        """
        errors = []
        for pod_name in pods_by_name.keys():
            df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(
                pod_name)
            disk_output = self.exec_oc(df_cmd, [],
                                       save_as_name='get_pv_diskspace.json')
            lines = disk_output.splitlines()
            # expecting one header looking like 'IUse% Use%' and one body line
            body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
            if len(lines) != 2 or len(lines[0].split()) != 2 or not re.match(
                    body_re, lines[1]):
                errors.append(
                    OpenShiftCheckException(
                        'BadDfResponse',
                        'Could not retrieve storage usage from logging ES pod "{pod}".\n'
                        'Response to `df` command was:\n{output}'.format(
                            pod=pod_name, output=disk_output)))
                continue
            inode_pct, disk_pct = re.match(body_re, lines[1]).groups()

            inode_pct_thresh = self.get_var('openshift_check_efk_es_inode_pct',
                                            default='90')
            if int(inode_pct) >= int(inode_pct_thresh):
                errors.append(
                    OpenShiftCheckException(
                        'InodeUsageTooHigh',
                        'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
                        '  is {pct}, greater than threshold {limit}.\n'
                        '  Note: threshold can be specified in inventory with {param}'
                        .format(
                            pod=pod_name,
                            pct=str(inode_pct),
                            limit=str(inode_pct_thresh),
                            param='openshift_check_efk_es_inode_pct',
                        )))
            disk_pct_thresh = self.get_var(
                'openshift_check_efk_es_storage_pct', default='80')
            if int(disk_pct) >= int(disk_pct_thresh):
                errors.append(
                    OpenShiftCheckException(
                        'DiskUsageTooHigh',
                        'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
                        '  is {pct}, greater than threshold {limit}.\n'
                        '  Note: threshold can be specified in inventory with {param}'
                        .format(
                            pod=pod_name,
                            pct=str(disk_pct),
                            limit=str(disk_pct_thresh),
                            param='openshift_check_efk_es_storage_pct',
                        )))

        return errors
Beispiel #10
0
    def exec_oc(execute_module=None, namespace="logging", cmd_str="", extra_args=None, task_vars=None):
        """
        Execute an 'oc' command in the remote host.
        Returns: output of command and namespace,
        or raises OpenShiftCheckException on error
        """
        config_base = get_var(task_vars, "openshift", "common", "config_base")
        args = {
            "namespace": namespace,
            "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
            "cmd": cmd_str,
            "extra_args": list(extra_args) if extra_args else [],
        }

        result = execute_module("ocutil", args, task_vars)
        if result.get("failed"):
            msg = (
                'Unexpected error using `oc` to validate the logging stack components.\n'
                'Error executing `oc {cmd}`:\n'
                '{error}'
            ).format(cmd=args['cmd'], error=result['result'])

            if result['result'] == '[Errno 2] No such file or directory':
                msg = (
                    "This host is supposed to be a master but does not have the `oc` command where expected.\n"
                    "Has an installation been run on this host yet?"
                )
            raise OpenShiftCheckException(msg)

        return result.get("result", "")
    def run(self):
        # attempt to get the docker info hash from the docker cli
        command = ' '.join(['docker', 'info', '--format', '"{{json .}}"'])
        command_args = dict(_raw_params=command)
        command_result = self.execute_module('command', command_args)
        if command_result.get('rc', 0) != 0 or command_result.get('failed'):
            raise OpenShiftCheckException(
                'RemoteCommandFailure',
                'Failed to execute command on remote host: %s' % command)
        docker_info = json.loads(command_result["stdout"])

        # check if the storage driver we saw is valid
        driver = docker_info.get("Driver", "[NONE]")
        if driver not in self.storage_drivers:
            msg = (
                "Detected unsupported Docker storage driver '{driver}'.\n"
                "Supported storage drivers are: {drivers}"
            ).format(driver=driver, drivers=', '.join(self.storage_drivers))
            return {"failed": True, "msg": msg}

        # driver status info is a list of tuples; convert to dict and validate based on driver
        driver_status = {item[0]: item[1] for item in docker_info.get("DriverStatus", [])}

        result = {}

        if driver == "devicemapper":
            result = self.check_devicemapper_support(driver_status)

        if driver in ['overlay', 'overlay2']:
            result = self.check_overlay_support(docker_info, driver_status)

        return result
Beispiel #12
0
    def read_command_output(self, command, utf8=True):
        """Execute the provided command using the command module
        and return its output.

        If the command is a string, use a shell.  Otherwise, assume the command
        is a list, join it with spaces, and execute it without shell.
        """
        uses_shell = False
        if isinstance(command, six.string_types):
            uses_shell = True
        else:
            command = ' '.join(command)

        command_args = dict(_raw_params=command, _uses_shell=uses_shell)
        # Use self._execute_module instead of self.execute_module because
        # the latter sets self.changed.
        result = self._execute_module('command', command_args)
        if result.get('rc', 0) != 0 or result.get('failed'):
            raise OpenShiftCheckException(
                'RemoteCommandFailure',
                'Failed to execute command on remote host: %s' % command)

        if utf8:
            return result['stdout'].encode('utf-8')
        return result['stdout']
Beispiel #13
0
    def query_es_from_es(self, es_pod, uuid):
        """curl the Elasticsearch pod and look for a unique uuid in its logs."""
        pod_name = es_pod["metadata"]["name"]
        exec_cmd = (
            "exec {pod_name} -- curl --max-time 30 -s -f "
            "--cacert /etc/elasticsearch/secret/admin-ca "
            "--cert /etc/elasticsearch/secret/admin-cert "
            "--key /etc/elasticsearch/secret/admin-key "
            "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
        )
        exec_cmd = exec_cmd.format(pod_name=pod_name,
                                   namespace=self.logging_namespace(),
                                   uuid=uuid)
        result = self.exec_oc(exec_cmd, [])

        try:
            count = json.loads(result)["count"]
        except (KeyError, ValueError):
            raise OpenShiftCheckException(
                'esInvalidResponse',
                'Invalid response from Elasticsearch query:\n'
                '  {}\n'
                'Response was:\n{}'.format(exec_cmd, result))

        return count
Beispiel #14
0
    def get_resource(self, kind):
        """Return a list of all resources of the specified kind."""
        for resource in self.task_vars['resources']['results']:
            if resource['item'] == kind:
                return resource['results']['results'][0]['items']

        raise OpenShiftCheckException('CouldNotListResource',
                                      'Could not list resource %s' % kind)
Beispiel #15
0
 def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs):
     """Retry an Elasticsearch query every second until query success, or a defined
     length of time has passed."""
     deadline = time.time() + timeout_secs
     interval = 1
     while not self.query_es_from_es(es_pod, uuid):
         if time.time() + interval > deadline:
             msg = "expecting match in Elasticsearch for message with uuid {}, but no matches were found after {}s."
             raise OpenShiftCheckException(msg.format(uuid, timeout_secs))
         time.sleep(interval)
Beispiel #16
0
    def parse_version(self, version):
        components = version.split(".")
        if not components or len(components) < 2:
            msg = "An invalid version of OpenShift was found for this host: {}"
            raise OpenShiftCheckException(msg.format(version))

        if components[0] in self.openshift_major_release_version:
            components[0] = self.openshift_major_release_version[components[0]]

        return '.'.join(components[:2])
    def _etcd_mount_info(self):
        ansible_mounts = self.get_var("ansible_mounts")
        mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}

        for path in self.supported_mount_paths:
            if path in mounts:
                return mounts[path]

        paths = ', '.join(sorted(mounts)) or 'none'
        msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths)
        raise OpenShiftCheckException(msg)
    def get_openshift_version_tuple(self):
        """Return received image tag as a normalized (X, Y) minor version tuple."""
        version = self.get_var("openshift_image_tag")
        comps = [int(component) for component in re.findall(r'\d+', version)]

        if len(comps) < 2:
            msg = "An invalid version of OpenShift was found for this host: {}"
            raise OpenShiftCheckException(msg.format(version))

        comps[0] = self.map_major_release_version.get(comps[0], comps[0])
        return tuple(comps[0:2])
    def _get_etcd_mountpath(ansible_mounts):
        valid_etcd_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]

        mount_for_path = {mnt.get("mount"): mnt for mnt in ansible_mounts}
        for path in valid_etcd_mount_paths:
            if path in mount_for_path:
                return mount_for_path[path]

        paths = ', '.join(sorted(mount_for_path)) or 'none'
        msg = "Unable to determine a valid etcd mountpath. Paths mounted: {}.".format(
            paths)
        raise OpenShiftCheckException(msg)
    def running_fluentd_pods(self):
        """Return a list of running fluentd pods."""
        fluentd_pods = self.get_pods_for_component("fluentd")

        running_fluentd_pods = [pod for pod in fluentd_pods if pod['status']['phase'] == 'Running']
        if not running_fluentd_pods:
            raise OpenShiftCheckException(
                'No Fluentd pods were found to be in the "Running" state. '
                'At least one Fluentd pod is required in order to perform this check.'
            )

        return running_fluentd_pods
Beispiel #21
0
    def check_elasticsearch_masters(self, pods_by_name):
        """Check that Elasticsearch masters are sane. Returns: list of errors"""
        es_master_names = set()
        errors = []
        for pod_name in pods_by_name.keys():
            # Compare what each ES node reports as master and compare for split brain
            get_master_cmd = self._build_es_curl_cmd(
                pod_name, "https://localhost:9200/_cat/master")
            master_name_str = self.exec_oc(
                get_master_cmd, [], save_as_name="get_master_names.json")
            master_names = (master_name_str or '').split(' ')
            if len(master_names) > 1:
                es_master_names.add(master_names[1])
            else:
                errors.append(
                    OpenShiftCheckException(
                        'NoMasterName',
                        'Elasticsearch {pod} gave unexpected response when asked master name:\n'
                        '  {response}'.format(pod=pod_name,
                                              response=master_name_str)))

        if not es_master_names:
            errors.append(
                OpenShiftCheckException(
                    'NoMasterFound',
                    'No logging Elasticsearch masters were found.'))
            return errors

        if len(es_master_names) > 1:
            errors.append(
                OpenShiftCheckException(
                    'SplitBrainMasters',
                    'Found multiple Elasticsearch masters according to the pods:\n'
                    '{master_list}\n'
                    'This implies that the masters have "split brain" and are not correctly\n'
                    'replicating data for the logging cluster. Log loss is likely to occur.'
                    .format(master_list='\n'.join(
                        '  ' + master for master in es_master_names))))

        return errors
def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
    exception_msg = 'fake check has an exception'
    run_exception = OpenShiftCheckException(exception_msg)
    check_class = fake_check(run_exception=run_exception)
    monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
    monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])

    result = plugin.run(tmp=None, task_vars=task_vars)

    assert failed(result['checks']['fake_check'], msg_has=exception_msg)
    assert failed(result, msg_has=['failed'])
    assert not changed(result)
    assert not skipped(result)
Beispiel #23
0
    def get_required_ovs_version(self, task_vars):
        """Return the correct Open vSwitch version for the current OpenShift version"""
        openshift_version = self._get_openshift_version(task_vars)

        if float(openshift_version) < 3.5:
            return self.openshift_to_ovs_version["3.4"]

        ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
        if ovs_version:
            return self.openshift_to_ovs_version[str(openshift_version)]

        msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
        raise OpenShiftCheckException(msg.format(openshift_version))
 def free_bytes(self, path):
     """Return the size available in path based on ansible_mounts."""
     mount = self.find_ansible_mount(path)
     try:
         return mount['size_available']
     except KeyError:
         raise OpenShiftCheckException(
             'Unable to retrieve disk availability for "{path}".\n'
             'Ansible facts included a matching mount point for this path:\n'
             '  {mount}\n'
             'however it is missing the size_available field.\n'
             'To investigate, you can inspect the output of `ansible -m setup <host>`'
             ''.format(path=path, mount=mount))
Beispiel #25
0
    def load_known_checks(self):
        load_checks()

        known_checks = {}
        for cls in OpenShiftCheck.subclasses():
            check_name = cls.name
            if check_name in known_checks:
                other_cls = known_checks[check_name].__class__
                raise OpenShiftCheckException(
                    "non-unique check name '{}' in: '{}.{}' and '{}.{}'".
                    format(check_name, cls.__module__, cls.__name__,
                           other_cls.__module__, other_cls.__name__))
            known_checks[check_name] = cls(execute_module=self._execute_module)
        return known_checks
Beispiel #26
0
    def running_fluentd_pods(self):
        """Return a list of running fluentd pods."""
        fluentd_pods, error = self.get_pods_for_component(
            self.logging_namespace,
            "fluentd",
        )
        if error:
            msg = 'Unable to retrieve any pods for the "fluentd" logging component: {}'.format(
                error)
            raise OpenShiftCheckException(msg)

        running_fluentd_pods = [
            pod for pod in fluentd_pods if pod['status']['phase'] == 'Running'
        ]
        if not running_fluentd_pods:
            msg = (
                'No Fluentd pods were found to be in the "Running" state. '
                'At least one Fluentd pod is required in order to perform this check.'
            )

            raise OpenShiftCheckException(msg)

        return running_fluentd_pods
Beispiel #27
0
    def query_es_from_es(self, es_pod, uuid):
        """curl the Elasticsearch pod and look for a unique uuid in its logs."""
        pod_name = es_pod["metadata"]["name"]
        exec_cmd = (
            "exec {pod_name} -- curl --max-time 30 -s -f "
            "--cacert /etc/elasticsearch/secret/admin-ca "
            "--cert /etc/elasticsearch/secret/admin-cert "
            "--key /etc/elasticsearch/secret/admin-key "
            "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
        )
        exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace, uuid=uuid)
        result = self.exec_oc(self.logging_namespace, exec_cmd, [])

        try:
            count = json.loads(result)["count"]
        except KeyError:
            msg = 'invalid response from Elasticsearch query:\n"{}"\nMissing "count" key:\n{}'
            raise OpenShiftCheckException(msg.format(exec_cmd, result))
        except ValueError:
            msg = 'invalid response from Elasticsearch query:\n"{}"\nNon-JSON output:\n{}'
            raise OpenShiftCheckException(msg.format(exec_cmd, result))

        return count
Beispiel #28
0
 def running_elasticsearch_pods(self, es_pods):
     """Returns: list of running pods, list of errors about non-running pods"""
     not_running = self.not_running_pods(es_pods)
     running_pods = [pod for pod in es_pods if pod not in not_running]
     if not_running:
         return running_pods, [OpenShiftCheckException(
             'PodNotRunning',
             'The following Elasticsearch pods are defined but not running:\n'
             '{pods}'.format(pods=''.join(
                 "  {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
                 for pod in not_running
             ))
         )]
     return running_pods, []
Beispiel #29
0
    def get_required_ovs_version(self):
        """Return the correct Open vSwitch version for the current OpenShift version"""
        openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))

        if openshift_version_tuple < (3, 5):
            return self.openshift_to_ovs_version["3.4"]

        openshift_version = ".".join(str(x) for x in openshift_version_tuple)
        ovs_version = self.openshift_to_ovs_version.get(openshift_version)
        if ovs_version:
            return self.openshift_to_ovs_version[openshift_version]

        msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
        raise OpenShiftCheckException(msg.format(openshift_version))
Beispiel #30
0
def test_register(task_vars):
    check = dummy_check(task_vars)

    check.register_failure(OpenShiftCheckException("spam"))
    assert "spam" in str(check.failures[0])

    with pytest.raises(OpenShiftCheckException) as excinfo:
        check.register_file("spam")  # no file contents specified
    assert "not specified" in str(excinfo.value)

    # normally execute_module registers the result file; test disabling that
    check._execute_module = lambda *args, **_: dict()
    check.execute_module("eggs", module_args={}, register=False)
    assert not check.files_to_save