Ejemplo n.º 1
0
def test_verify_noobaa_status():
    """
    Verify noobaa status output is clean without any errors
    """
    # Get noobaa status
    noobaa_status = run_async(
        f'noobaa status -n {defaults.ROOK_CLUSTER_NAMESPACE} 2>&1')
    ret, out, _ = noobaa_status.async_communicate()
    assert not ret, (
        f"noobaa status command failed.\nreturn code: {ret}\nstdout:\n{out}")

    # Verify noobaa status
    for content, count in defaults.NOOBAA_STATUS_CONTENT_COUNT.items():
        assert count == out.count(f'Exists: {content} '), (
            f"Could not find expected match for {content} in noobaa status "
            f"output. noobaa status:\n{out}")
    assert 'System Phase is \\"Ready\\"' in out, (
        f"System Phase is not 'Ready'. noobaa status:\n{out}")
    assert 'Exists:  \\"noobaa-admin\\"' in out, (
        f"'noobaa-admin' does not exists. noobaa status:\n{out}")

    for line in out.split('\n'):
        if 'Not Found' in line:
            assert 'Optional' in line, f"Error in noobaa status output- {line}"
    log.info("Verified: noobaa status does not contain any error.")
Ejemplo n.º 2
0
    def select_daemon(self, node_name=None):
        """
        Select pid of self.resource daemon

        Args:
            node_name (str): Name of node in which the resource daemon has
                to be selected.
        """
        node_name = node_name or self.resource_obj[0].pod_data.get("spec").get(
            "nodeName")
        awk_print = "'{print $1}'"
        pid_cmd = (
            f"oc {self.kubeconfig_parameter()}debug node/{node_name} -- chroot /host ps ax | grep"
            f" ' ceph-{self.resource} --' | grep -v grep | awk {awk_print}")
        pid_proc = run_async(pid_cmd)
        ret, pid, err = pid_proc.async_communicate()
        pid = pid.strip()

        # Consider scenario where more than one self.resource pod is running
        # on one node. eg: More than one osd on same node.
        pids = pid.split()
        self.pids = [pid.strip() for pid in pids]
        assert self.pids, "Obtained pid value is empty."
        pid = self.pids[0]

        # ret will be 0 and err will be None if command is success
        assert not any([ret, err, not pid.isdigit()]), (
            f"Failed to fetch pid of ceph-{self.resource} "
            f"from {node_name}. ret:{ret}, pid:{pid}, err:{err}")

        self.daemon_pid = pid
    def test_multiple_pvc_concurrent_creation_deletion(self):
        """
        To exercise resource creation and deletion
        """
        # Start deleting 100 PVCs
        command = (f'for i in `seq 1 {self.number_of_pvc}`;do oc delete pvc '
                   f'{self.pvc_base_name}$i -n {self.namespace};done')
        proc = run_async(command)
        assert proc, (
            f'Failed to execute command for deleting {self.number_of_pvc} PVCs'
        )

        # Create 100 new PVCs
        # Parameters for PVC yaml as dict
        pvc_data = load_yaml_to_dict(constants.CSI_PVC_YAML)
        pvc_data['metadata']['namespace'] = self.namespace
        pvc_data['spec']['storageClassName'] = self.sc_obj.name
        pvc_data['metadata']['name'] = self.pvc_base_name_new

        # Create 100 PVCs
        pvc_objs = create_multiple_pvc(self.number_of_pvc, pvc_data)

        log.info(f'Created {self.number_of_pvc} new PVCs.')
        self.pvc_objs_new = pvc_objs[:]

        # Verify PVCs are Bound
        for pvc in self.pvc_objs_new:
            pvc.reload()
            assert pvc.status == constants.STATUS_BOUND, (
                f'PVC {pvc.name} is not Bound')
        log.info('Verified: Newly created PVCs are in Bound state.')

        # Verify command to delete PVCs
        ret, out, err = proc.async_communicate()
        log.info(
            f'Return values of command: {command}.\nretcode:{ret}\nstdout:'
            f'{out}\nstderr:{err}')
        assert not ret, 'Deletion of PVCs failed'

        # Verify PVCs are deleted
        for pvc in self.pvc_objs_initial:
            try:
                pvc.get()
                return False
            except exceptions.CommandFailed as exp:
                assert "not found" in str(exp), (
                    f'Failed to fetch details of PVC {pvc.name}')
                log.info(f'Expected: PVC {pvc.name} does not exists ')
        log.info(f'Successfully deleted initial {self.number_of_pvc} PVCs')

        # Verify PVs using ceph toolbox. PVs should be deleted because
        # reclaimPolicy is Delete
        ceph_cmd = f'rbd ls -p {self.cbp_obj.name}'
        ct_pod = get_ceph_tools_pod()
        final_pv_list = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd, format='json')
        assert not any(pv in final_pv_list for pv in self.initial_pvs), (
            'PVs associated with deleted PVCs still exists')
        log.info('Verified: PVs associated with deleted PVCs are also deleted')
Ejemplo n.º 4
0
    def select_daemon(self, node_name=None):
        """
        Select pid of self.resource daemon

        Args:
            node_name (str): Name of node in which the resource daemon has
                to be selected.
        """
        node_name = node_name or self.resource_obj[0].pod_data.get('spec').get(
            'nodeName')
        awk_print = "'{print $1}'"
        pid_cmd = (
            f"oc debug node/{node_name} -- chroot /host ps ax | grep"
            f" ' ceph-{self.resource} --' | grep -v grep | awk {awk_print}")
        pid_proc = run_async(pid_cmd)
        ret, pid, err = pid_proc.async_communicate()
        pid = pid.strip()

        # ret will be 0 and err will be None if command is success
        assert not any([ret, err, not pid.isdigit()]), (
            f"Failed to fetch pid of ceph-{self.resource} "
            f"from {node_name}. ret:{ret}, pid:{pid}, err:{err}")

        self.daemon_pid = pid