Esempio n. 1
0
    def delete(self, yaml_file=None, resource_name='', wait=True, force=False):
        """
        Deletes a resource

        Args:
            yaml_file (str): Path to a yaml file to use in 'oc delete -f
                file.yaml
            resource_name (str): Name of the resource you want to delete
            wait (bool): Determines if the delete command should wait to
                completion
            force (bool): True for force deletion with --grace-period=0,
                False otherwise

        Returns:
            dict: Dictionary represents a returned yaml file

        Raises:
            CommandFailed: In case yaml_file and resource_name wasn't provided
        """
        if not (yaml_file or resource_name):
            raise CommandFailed(
                "At least one of resource_name or yaml_file have to "
                "be provided")

        command = f"delete "
        if resource_name:
            command += f"{self.kind} {resource_name}"
        else:
            command += f"-f {yaml_file}"
        if force:
            command += " --grace-period=0 --force"
        # oc default for wait is True
        if not wait:
            command += " --wait=false"
        return self.exec_oc_cmd(command)
Esempio n. 2
0
def run_cmd(cmd, **kwargs):
    """
    Run an arbitrary command locally

    Args:
        cmd (str): command to run

    Raises:
        CommandFailed: In case the command execution fails

    Returns:
        (str) Decoded stdout of command

    """
    log.info(f"Executing command: {cmd}")
    if isinstance(cmd, str):
        cmd = shlex.split(cmd)
    r = subprocess.run(cmd,
                       stdout=subprocess.PIPE,
                       stderr=subprocess.PIPE,
                       stdin=subprocess.PIPE,
                       **kwargs)
    log.debug(f"Command output: {r.stdout.decode()}")
    if r.stderr and not r.returncode:
        log.warning(f"Command warning:: {r.stderr.decode()}")
    if r.returncode:
        raise CommandFailed(f"Error during execution of command: {cmd}."
                            f"\nError is {r.stderr.decode()}")
    return r.stdout.decode()
Esempio n. 3
0
    def create(self, yaml_file=None, resource_name='', out_yaml_format=True):
        """
        Creates a new resource

        Args:
            yaml_file (str): Path to a yaml file to use in 'oc create -f
                file.yaml
            resource_name (str): Name of the resource you want to create
            out_yaml_format (bool): Determines if the output should be
                formatted to a yaml like string

        Returns:
            dict: Dictionary represents a returned yaml file
        """
        if not (yaml_file or resource_name):
            raise CommandFailed(
                "At least one of resource_name or yaml_file have to "
                "be provided")
        command = "create "
        if yaml_file:
            command += f"-f {yaml_file}"
        elif resource_name:
            # e.g "oc namespace my-project"
            command += f"{self.kind} {resource_name}"
        if out_yaml_format:
            command += " -o yaml"
        output = self.exec_oc_cmd(command)
        log.debug(f"{yaml.dump(output)}")
        return output
Esempio n. 4
0
    def exec_ceph_cmd(self, ceph_cmd, format='json-pretty'):
        """
        Execute a Ceph command on the Ceph tools pod

        Args:
            ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
            format (str): The returning output format of the Ceph command

        Returns:
            dict: Ceph command output

        Raises:
            CommandFailed: In case the pod is not a toolbox pod
        """
        if 'rook-ceph-tools' not in self.labels.values():
            raise CommandFailed(
                "Ceph commands can be executed only on toolbox pod")
        ceph_cmd = ceph_cmd
        if format:
            ceph_cmd += f" --format {format}"
        out = self.exec_cmd_on_pod(ceph_cmd)

        # For some commands, like "ceph fs ls", the returned output is a list
        if isinstance(out, list):
            return [item for item in out if item]
        return out
    def setup(self):
        """
        Setting up the test environment :
            Calculating the amount of storage which available for the test
            Creating namespace (project) for the test

        """
        log.info("Setting up the test environment")

        super(TestPvcMultiSnapshotPerformance, self).setup()

        # Getting the total Storage capacity
        try:
            self.ceph_capacity = int(self.ceph_cluster.get_ceph_capacity())
        except Exception as err:
            err_msg = f"Failed to get Storage capacity : {err}"
            log.error(err_msg)
            raise Exception(err_msg)

        # Use 70% of the storage capacity in the test
        self.capacity_to_use = int(self.ceph_capacity * 0.7)

        # Creating new namespace for the test
        self.nss_name = "pas-test-namespace"
        log.info(f"Creating new namespace ({self.nss_name}) for the test")
        try:
            self.proj = helpers.create_project(project_name=self.nss_name)
        except CommandFailed as ex:
            if str(ex).find("(AlreadyExists)"):
                log.warning("The Namespace is Already Exists !")
            log.error("Can not create new project")
            raise CommandFailed(f"{self.nss_name} was not created")

        # Initialize a general Snapshot object to use in the test
        self.snapshot = OCP(kind="volumesnapshot", namespace=self.nss_name)
Esempio n. 6
0
def generate_onboarding_token():
    """
    Generate Onboarding token for consumer cluster via following steps:

    1. Download ticketgen.sh script from:
        https://raw.githubusercontent.com/jarrpa/ocs-operator/ticketgen/hack/ticketgen/ticketgen.sh
    2. Save private key from AUTH["managed_service"]["private_key"] to
        temporary file.
    3. Run ticketgen.sh script to generate Onboarding token.

    Raises:
        CommandFailed: In case the script ticketgen.sh fails.
        ConfigurationError: when AUTH["managed_service"]["private_key"] not is not defined

    Returns:
        string: Onboarding token

    """
    logger.debug("Generate onboarding token for ODF to ODF deployment")
    ticketgen_script_path = os.path.join(constants.DATA_DIR, "ticketgen.sh")
    # download ticketgen.sh script
    logger.debug("Download and prepare ticketgen.sh script")
    download_file(
        "https://raw.githubusercontent.com/jarrpa/ocs-operator/ticketgen/hack/ticketgen/ticketgen.sh",
        ticketgen_script_path,
    )
    # add execute permission to the ticketgen.sh script
    current_file_permissions = os.stat(ticketgen_script_path)
    os.chmod(
        ticketgen_script_path,
        current_file_permissions.st_mode | stat.S_IEXEC,
    )
    # save private key to temp file
    logger.debug("Prepare temporary file with private key")
    private_key = config.AUTH.get("managed_service", {}).get("private_key", "")
    if not private_key:
        raise ConfigurationError(
            "Private key for Managed Service not defined.\n"
            "Expected following configuration in auth.yaml file:\n"
            "managed_service:\n"
            '  private_key: "..."\n'
            '  public_key: "..."'
        )
    with NamedTemporaryFile(
        mode="w", prefix="private", suffix=".pem", delete=True
    ) as key_file:
        key_file.write(private_key)
        key_file.flush()
        logger.debug("Generate Onboarding token")
        ticketgen_result = exec_cmd(f"{ticketgen_script_path} {key_file.name}")
    ticketgen_output = ticketgen_result.stdout.decode()
    if ticketgen_result.stderr:
        raise CommandFailed(
            f"Script ticketgen.sh failed to generate Onboarding token:\n"
            f"command: '{' '.join(ticketgen_result.args)}'\n"
            f"stderr: {ticketgen_result.stderr.decode()}\n"
            f"stdout: {ticketgen_output}"
        )
    return ticketgen_output
Esempio n. 7
0
 def create_test_project(self):
     """
     Creating new project (namespace) for performance test
     """
     self.namespace = helpers.create_unique_resource_name("pas-test", "namespace")
     log.info(f"Creating new namespace ({self.namespace}) for the test")
     try:
         self.proj = helpers.create_project(project_name=self.namespace)
     except CommandFailed as ex:
         if str(ex).find("(AlreadyExists)"):
             log.warning("The namespace already exists !")
         log.error("Cannot create new project")
         raise CommandFailed(f"{self.namespace} was not created")
Esempio n. 8
0
 def delete_test_project(self):
     """
     Deleting the performance test project (namespace)
     """
     log.info(f"Deleting the test namespace : {self.namespace}")
     switch_to_default_rook_cluster_project()
     try:
         self.proj.delete(resource_name=self.namespace)
         self.proj.wait_for_delete(
             resource_name=self.namespace, timeout=60, sleep=10
         )
     except CommandFailed:
         log.error(f"Cannot delete project {self.namespace}")
         raise CommandFailed(f"{self.namespace} was not created")
Esempio n. 9
0
    def delete_project(self, project_name):
        """
        Delete a project.  A project created by the new_project function does
        not have a corresponding yaml file so normal resource deletion calls
        do not work

        Args:
            project_name (str): Name of the project to be deleted

        Returns:
            bool: True in case project deletion succeeded.

        Raises:
            CommandFailed: When the project deletion does not succeed.

        """
        command = f"oc delete project {project_name}"
        if f' "{project_name}" deleted' in run_cmd(f"{command}"):
            return True
        raise CommandFailed(f"{project_name} was not deleted")
Esempio n. 10
0
def run_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs):
    """
    Run an arbitrary command locally

    Args:
        cmd (str): command to run
        secrets (list): A list of secrets to be masked with asterisks
            This kwarg is popped in order to not interfere with
            subprocess.run(``**kwargs``)
        timeout (int): Timeout for the command, defaults to 600 seconds.
        ignore_error (bool): True if ignore non zero return code and do not
            raise the exception.

    Raises:
        CommandFailed: In case the command execution fails

    Returns:
        (str) Decoded stdout of command

    """
    masked_cmd = mask_secrets(cmd, secrets)
    log.info(f"Executing command: {masked_cmd}")
    if isinstance(cmd, str):
        cmd = shlex.split(cmd)
    r = subprocess.run(
        cmd,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        stdin=subprocess.PIPE,
        timeout=timeout,
        **kwargs
    )
    log.debug(f"Command output: {r.stdout.decode()}")
    if r.stderr and not r.returncode:
        log.warning(f"Command warning: {mask_secrets(r.stderr.decode(), secrets)}")
    if r.returncode and not ignore_error:
        raise CommandFailed(
            f"Error during execution of command: {masked_cmd}."
            f"\nError is {mask_secrets(r.stderr.decode(), secrets)}"
        )
    return mask_secrets(r.stdout.decode(), secrets)
Esempio n. 11
0
    def get_fio_results(self):
        """
        Get FIO execution results

        Returns:
            dict: Dictionary represents the FIO execution results

        Raises:
            Exception: In case of exception from FIO
        """
        try:
            result = self.fio_thread.result(FIO_TIMEOUT)
            if result:
                return yaml.safe_load(result)
            raise CommandFailed(f"FIO execution results: {result}.")

        except CommandFailed as ex:
            logger.exception(f"FIO failed: {ex}")
            raise
        except Exception as ex:
            logger.exception(f"Found Exception: {ex}")
            raise
Esempio n. 12
0
def run_cmd(cmd, secrets=None, **kwargs):
    """
    Run an arbitrary command locally

    Args:
        cmd (str): command to run

        secrets (list): A list of secrets to be masked with asterisks
            This kwarg is popped in order to not interfere with
            subprocess.run(**kwargs)

    Raises:
        CommandFailed: In case the command execution fails

    Returns:
        (str) Decoded stdout of command

    """
    masked_cmd = mask_secrets(cmd, secrets)
    log.info(f"Executing command: {masked_cmd}")
    if isinstance(cmd, str):
        cmd = shlex.split(cmd)
    r = subprocess.run(
        cmd,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        stdin=subprocess.PIPE,
        **kwargs
    )
    log.debug(f"Command output: {r.stdout.decode()}")
    if r.stderr and not r.returncode:
        log.warning(f"Command warning: {mask_secrets(r.stderr.decode(), secrets)}")
    if r.returncode:
        raise CommandFailed(
            f"Error during execution of command: {masked_cmd}."
            f"\nError is {mask_secrets(r.stderr.decode(), secrets)}"
        )
    return mask_secrets(r.stdout.decode(), secrets)
Esempio n. 13
0
 def win_exec(self, ps_command, timeout=180):
     log.info("Running powershell`s command `{}`".format(ps_command))
     client = paramiko.SSHClient()
     client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     client.connect(self.ip_address,
                    username=self.login,
                    password=self.password)
     command = 'powershell -Command "& {{{}}}"'.format(ps_command)
     chan_ssh = client.get_transport().open_session()
     chan_ssh.exec_command(command)
     for i in range(0, timeout):
         sleep(1)
         if chan_ssh.exit_status_ready():
             break
     else:
         raise TimeoutException("Timeout")
     output = dict()
     output["exit_code"] = chan_ssh.recv_exit_status()
     output["stdout"] = chan_ssh.recv(-1)
     output["stderr"] = chan_ssh.recv_stderr(-1)
     if not bool(output["stderr"]) and output["exit_code"] == 0:
         return output
     else:
         raise CommandFailed(output["stderr"])
Esempio n. 14
0
    def run(self, podname, namespace, cmd_obj):
        resp = None
        stdout = None
        stderr = None
        ret = None

        try:
            resp = self.api.read_namespaced_pod(name=podname, namespace=namespace)
            logger.info(resp)
        except ApiException as ex:
            if ex.status != 404:
                logger.error("Unknown error: %s" % ex)

        # run command in bash
        bash = ["/bin/bash"]
        resp = stream(
            self.api.connect_get_namespaced_pod_exec,
            podname,
            namespace,
            command=bash,
            stderr=True,
            stdin=True,
            stdout=True,
            tty=False,
            _preload_content=False,
        )
        done = False
        outbuf = ""
        while resp.is_open():
            resp.update(timeout=1)
            if resp.peek_stdout():
                stdout = resp.read_stdout(timeout=cmd_obj.timeout)
                outbuf = outbuf + stdout
                if cmd_obj.long_running:
                    while resp.peek_stdout(timeout=cmd_obj.timeout):
                        stdout = resp.read_stdout(timeout=cmd_obj.timeout)
                        outbuf = outbuf + stdout
            if resp.peek_stderr():
                stderr = resp.read_stderr(timeout=60)
            if not done:
                resp.write_stdin(cmd_obj.cmd)
                resp.write_stdin("\n")
                done = True
            else:
                break
        """
        Couple of glitches in capturing return value.
        Rest api doesn't return ret value of the command
        hence this workaround.
        we can fix this once we have facility to capture err code
        """
        if cmd_obj.check_ec:
            resp.write_stdin("echo $?\n")
            try:
                ret = int(resp.readline_stdout(timeout=5))
            except (ValueError, TypeError):
                logger.error(f"TimeOut: Command timedout after {cmd_obj.timeout}")
                raise CommandFailed(f'Failed to run "{cmd_obj.cmd}"')
            finally:
                resp.close()

        if outbuf:
            stdout = outbuf

        return stdout, stderr, ret
Esempio n. 15
0
    def teardown(self):
        """
        Cleaning up the environment :
            Delete all snapshot
            Delete the POD
            Delete the PVC and the PV
            Delete the StorageClass
            Delete the VolumeSnapshotClass
            Delete the data pool
            Switch to the default namespace
            Delete the tested namespace

        """
        log.info("Cleanup the test environment")

        if self.full_teardown:
            # Getting the name of the PCV's backed PV
            try:
                pv = self.pvc_obj.get("spec")["spec"]["volumeName"]
            except KeyError:
                log.error(
                    f"Cannot found key in the PVC object {json.dumps(self.pvc_obj.get('spec').get('spec'), indent=3)}"
                )

            # Getting the list of all snapshots
            try:
                snapshot_list = self.snapshot.get(all_namespaces=True)["items"]
            except Exception as err:
                log.error(f"Cannot get the list of snapshots : {err}")
                snapshot_list = []

            # Deleting al snapshots from the cluster
            log.info(f"Trying to delete all ({len(snapshot_list)}) Snapshots")
            log.debug(
                f"The list of all snapshots is : {json.dumps(snapshot_list, indent=3)}"
            )
            for vs in snapshot_list:
                snap_name = vs["metadata"]["name"]
                log.info(f"Try to delete {snap_name}")
                try:
                    self.snapshot.delete(resource_name=snap_name)
                except Exception as err:
                    log.error(f"Cannot delete {snap_name} : {err}")

            # Deleting the pod which wrote data to the pvc
            log.info(f"Deleting the test POD : {self.pod_obj.name}")
            try:
                self.pod_obj.delete()
                log.info("Wait until the pod is deleted.")
                self.pod_obj.ocp.wait_for_delete(
                    resource_name=self.pod_obj.name)
            except Exception as ex:
                log.error(f"Cannot delete the test pod : {ex}")

            # Deleting the PVC which used in the test.
            try:
                log.info(f"Delete the PVC : {self.pvc_obj.name}")
                self.pvc_obj.delete()
                log.info("Wait until the pvc is deleted.")
                self.pvc_obj.ocp.wait_for_delete(
                    resource_name=self.pvc_obj.name)
            except Exception as ex:
                log.error(f"Cannot delete the test pvc : {ex}")

            # Delete the backend PV of the PVC
            log.info(f"Try to delete the backend PV : {pv}")
            try:
                run_oc_command(f"delete pv {pv}")
            except Exception as ex:
                err_msg = f"cannot delete PV {pv} - [{ex}]"
                log.error(err_msg)

            # Deleting the StorageClass used in the test
            log.info(f"Deleting the test StorageClass : {self.sc_obj.name}")
            try:
                self.sc_obj.delete()
                log.info("Wait until the SC is deleted.")
                self.sc_obj.ocp.wait_for_delete(resource_name=self.sc_obj.name)
            except Exception as ex:
                log.error(f"Can not delete the test sc : {ex}")

            # Deleting the VolumeSnapshotClass used in the test
            log.info(
                f"Deleting the test Snapshot Class : {self.snap_class.name}")
            try:
                self.snap_class.delete()
                log.info("Wait until the VSC is deleted.")
                self.snap_class.ocp.wait_for_delete(
                    resource_name=self.snap_class.name)
            except Exception as ex:
                log.error(f"Can not delete the test vsc : {ex}")

            # Deleting the Data pool
            log.info(f"Deleting the test storage pool : {self.sc_name}")
            self.delete_ceph_pool(self.sc_name)
            # Verify deletion by checking the backend CEPH pools using the toolbox
            results = self.ceph_cluster.toolbox.exec_cmd_on_pod(
                "ceph osd pool ls")
            log.debug(f"Existing pools are : {results}")
            if self.sc_name in results.split():
                log.warning(
                    "The pool did not deleted by CSI, forcing delete it manually"
                )
                self.ceph_cluster.toolbox.exec_cmd_on_pod(
                    f"ceph osd pool delete {self.sc_name} {self.sc_name} "
                    "--yes-i-really-really-mean-it")
            else:
                log.info(f"The pool {self.sc_name} was deleted successfully")

            # Deleting the namespace used by the test
            log.info(f"Deleting the test namespace : {self.nss_name}")
            switch_to_default_rook_cluster_project()
            try:
                self.proj.delete(resource_name=self.nss_name)
                self.proj.wait_for_delete(resource_name=self.nss_name,
                                          timeout=60,
                                          sleep=10)
            except CommandFailed:
                log.error(f"Can not delete project {self.nss_name}")
                raise CommandFailed(f"{self.nss_name} was not created")

            # After deleting all data from the cluster, we need to wait until it will re-balance
            ceph_health_check(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
                              tries=30,
                              delay=60)

        super(TestPvcMultiSnapshotPerformance, self).teardown()
Esempio n. 16
0
    def exec_command(self, **kw):
        """
        execute a command on the vm
        eg: self.exec_cmd(cmd='uptime')
        or
        self.exec_cmd(cmd='background_cmd', check_ec=False)

        Attributes:
            check_ec (bool): False will run the command and not wait for exit
                code

        """

        if kw.get("sudo"):
            ssh = self.rssh
        else:
            ssh = self.ssh

        if kw.get("timeout"):
            timeout = kw["timeout"]
        else:
            timeout = 120
        logger.info("Running command %s on %s", kw["cmd"], self.ip_address)
        stdin = None
        stdout = None
        stderr = None
        if self.run_once:
            self.ssh_transport().set_keepalive(15)
            self.rssh_transport().set_keepalive(15)
        if kw.get("long_running"):
            logger.info("long running command --")
            channel = ssh().get_transport().open_session()
            channel.exec_command(kw["cmd"])
            read = ""
            while True:
                if channel.exit_status_ready():
                    ec = channel.recv_exit_status()
                    break
                rl, wl, xl = select([channel], [], [channel], 4200)
                if len(rl) > 0:
                    data = channel.recv(1024)
                    read += data.decode()
                    logger.info(data.decode())
                if len(xl) > 0:
                    data = channel.recv(1024)
                    read += data.decode()
                    logger.info(data.decode())
            return read, ec
        try:
            stdin, stdout, stderr = ssh().exec_command(kw["cmd"],
                                                       timeout=timeout)
        except SSHException as e:
            logger.error("Exception during cmd %s", str(e))
            if "Timeout openning channel" in str(e):
                logger.error("channel reset error")
        exit_status = stdout.channel.recv_exit_status()
        self.exit_status = exit_status
        if kw.get("check_ec", True):
            if exit_status == 0:
                logger.info("Command completed successfully")
            else:
                logger.error("Error during cmd %s, timeout %d", exit_status,
                             timeout)
                raise CommandFailed(kw["cmd"] + " Error:  " +
                                    str(stderr.read().decode()) + " " +
                                    str(self.ip_address))
            return stdout, stderr
        else:
            return stdout, stderr