Example #1
0
def test_run_cmd_simple_positive_with_secrets(caplog):
    """
    Check simple positive use case for run_cmd, including logging,
    when secrets are specified.
    """
    caplog.set_level(logging.DEBUG)
    secrets = ["8bca8d2e-1cd6", "683c08d7-bc07"]
    cmd = "echo -n hello 8bca8d2e-1cd6"
    assert utils.run_cmd(cmd, secrets=secrets) == "hello *****"
    # check that logs were satinized as well
    for secret in secrets:
        assert secret not in caplog.text
Example #2
0
    def image_uuid(self):
        """
        Fetch image uuid associated with PVC

        Returns:
            str: Image uuid associated with PVC
        """
        spec_volhandle = "'{.spec.csi.volumeHandle}'"
        cmd = f"oc get pv/{self.backed_pv} -o jsonpath={spec_volhandle}"
        out = run_cmd(cmd=cmd)
        image_uuid = "-".join(out.split('-')[-5:])
        return image_uuid
Example #3
0
    def create_quay_registry(self):
        """
        Creates Quay registry

        """
        if not helpers.get_default_storage_class():
            patch = ' \'{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}\' '
            run_cmd(
                f"oc patch storageclass {self.sc_name} "
                f"-p {patch} "
                f"--request-timeout=120s"
            )
            self.sc_default = True
        quay_registry_data = templating.load_yaml(file=constants.QUAY_REGISTRY)
        self.quay_registry_name = quay_registry_data["metadata"]["name"]
        self.quay_registry = OCS(**quay_registry_data)
        logger.info(f"Creating Quay registry: {self.quay_registry.name}")
        self.quay_registry.create()
        logger.info("Waiting for 15s for registry to get initialized")
        sleep(15)
        self.wait_for_quay_endpoint()
Example #4
0
def fetch_all_device_paths():
    """
    Return all device paths inside worker nodes

    Returns:
        list : List containing all device paths

    """
    path = os.path.join(constants.EXTERNAL_DIR, "device-by-id-ocp")
    clone_repo(constants.OCP_QE_DEVICEPATH_REPO, path)
    os.chdir(path)
    logger.info("Running script to fetch device paths...")
    run_cmd("ansible-playbook devices_by_id.yml")
    with open("local-storage-block.yaml") as local_storage_block:
        local_block = yaml.load(local_storage_block, Loader=yaml.FullLoader)
        dev_paths = local_block["spec"]["storageClassDevices"][0][
            "devicePaths"]
    logger.info(f"All devices are {dev_paths}")
    os.chdir(constants.TOP_DIR)
    shutil.rmtree(path)
    return dev_paths
Example #5
0
        def deploy(self, log_cli_level='DEBUG'):
            """
            Deployment specific to OCP cluster on this platform

            Args:
                log_cli_level (str): openshift installer's log level
                    (default: "DEBUG")

            """
            logger.info("Deploying OCP cluster for vSphere platform")
            logger.info(
                f"Openshift-installer will be using loglevel:{log_cli_level}")
            os.chdir(self.terraform_data_dir)
            self.terraform.initialize()
            self.terraform.apply(self.terraform_var)
            os.chdir(self.previous_dir)
            logger.info("waiting for bootstrap to complete")
            run_cmd(f"{self.installer} wait-for bootstrap-complete "
                    f"--dir {self.cluster_path} "
                    f"--log-level {log_cli_level}")
            logger.info("removing bootstrap node")
            os.chdir(self.terraform_data_dir)
            self.terraform.apply(self.terraform_var, bootstrap_complete=True)
            os.chdir(self.previous_dir)

            OCP.set_kubeconfig(self.kubeconfig)
            # wait for image registry to show-up
            co = "image-registry"
            wait_for_co(co)

            # patch image registry to null
            self.configure_storage_for_image_registry(self.kubeconfig)

            # wait for install to complete
            logger.info("waiting for install to complete")
            run_cmd(f"{self.installer} wait-for install-complete "
                    f"--dir {self.cluster_path} "
                    f"--log-level {log_cli_level}")

            self.test_cluster()
Example #6
0
def add_million_files(pod_name, ocp_obj):
    """
    Create a directory with one million files in it.
    Tar that directory to a zipped tar file.
    rsynch that tar file to the cephfs pod
    Extract the tar files on ceph pod onto the mounted ceph filesystem.

    Returns:
        list: list of ten of the files created.
    """
    logging.info(f"Creating {TFILES} files on Cephfs")
    onetenth = TFILES / 10
    endoften = onetenth - 1
    ntar_loc = mkdtemp()
    tarfile = os.path.join(ntar_loc, TARFILE)
    new_dir = mkdtemp()
    test_file_list = []
    for i in range(0, TFILES):
        tmpfile = NamedTemporaryFile(dir=new_dir, delete=False)
        fname = tmpfile.name
        with tmpfile:
            tmpfile.write(SAMPLE_TEXT)
        if i % onetenth == endoften:
            dispv = i + 1
            logging.info(f"{dispv} local files created")
            test_file_list.append(fname.split(os.sep)[-1])
    tmploc = ntar_loc.split("/")[-1]
    run_cmd(f"tar cfz {tarfile} -C {new_dir} .")
    ocp_obj.exec_oc_cmd(f"rsync {ntar_loc} {pod_name}:{constants.MOUNT_POINT}",
                        timeout=300)
    ocp_obj.exec_oc_cmd(f"exec {pod_name} -- mkdir {constants.MOUNT_POINT}/x")
    ocp_obj.exec_oc_cmd(
        f"exec {pod_name} -- /bin/tar xf"
        f" {constants.MOUNT_POINT}/{tmploc}/{TARFILE}"
        f" -C {constants.MOUNT_POINT}/x",
        timeout=3600,
    )
    rmtree(new_dir)
    os.remove(tarfile)
    return test_file_list
Example #7
0
 def create_catalog_source(self):
     """
     This prepare catalog source manifest for deploy OCS operator from
     quay registry.
     """
     image = config.DEPLOYMENT.get('ocs_registry_image', '')
     upgrade = config.DEPLOYMENT.get('upgrade', False)
     image_and_tag = image.split(':')
     image = image_and_tag[0]
     image_tag = image_and_tag[1] if len(image_and_tag) == 2 else None
     if not image_tag and config.REPORTING.get("us_ds") == 'DS':
         image_tag = get_latest_ds_olm_tag(upgrade,
                                           latest_tag=config.DEPLOYMENT.get(
                                               'default_latest_tag',
                                               'latest'))
     catalog_source_data = templating.load_yaml(
         constants.CATALOG_SOURCE_YAML)
     cs_name = constants.OPERATOR_CATALOG_SOURCE_NAME
     # TODO: Once needed we can also set the channel for the subscription
     # from config.DEPLOYMENT.get('ocs_csv_channel')
     change_cs_condition = (
         (image or image_tag)
         and catalog_source_data['kind'] == 'CatalogSource'
         and catalog_source_data['metadata']['name'] == cs_name)
     if change_cs_condition:
         default_image = config.DEPLOYMENT['default_ocs_registry_image']
         image = image if image else default_image.split(':')[0]
         catalog_source_data['spec']['image'] = (
             f"{image}:{image_tag if image_tag else 'latest'}")
     catalog_source_manifest = tempfile.NamedTemporaryFile(
         mode='w+', prefix='catalog_source_manifest', delete=False)
     templating.dump_data_to_temp_yaml(catalog_source_data,
                                       catalog_source_manifest.name)
     run_cmd(f"oc create -f {catalog_source_manifest.name}", timeout=2400)
     catalog_source = CatalogSource(
         resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,
         namespace='openshift-marketplace',
     )
     # Wait for catalog source is ready
     catalog_source.wait_for_state("READY")
Example #8
0
    def subscribe_ocs(self):
        """
        This method subscription manifest and subscribe to OCS operator.

        """
        # wait for package manifest
        package_manifest = PackageManifest(
            resource_name=defaults.OCS_OPERATOR_NAME)
        # Wait for package manifest is ready
        package_manifest.wait_for_resource(timeout=300)
        default_channel = package_manifest.get_default_channel()
        subscription_yaml_data = templating.load_yaml(
            constants.SUBSCRIPTION_YAML)
        subscription_plan_approval = config.DEPLOYMENT.get(
            'subscription_plan_approval')
        if subscription_plan_approval:
            subscription_yaml_data['spec']['installPlanApproval'] = (
                subscription_plan_approval)
        custom_channel = config.DEPLOYMENT.get('ocs_csv_channel')
        if custom_channel:
            logger.info(f"Custom channel will be used: {custom_channel}")
            subscription_yaml_data['spec']['channel'] = custom_channel
        else:
            logger.info(f"Default channel will be used: {default_channel}")
            subscription_yaml_data['spec']['channel'] = default_channel
        if config.DEPLOYMENT.get('stage'):
            subscription_yaml_data['spec']['source'] = (
                config.DEPLOYMENT['stage_namespace'])
        if config.DEPLOYMENT.get('live_deployment'):
            subscription_yaml_data['spec']['source'] = (config.DEPLOYMENT.get(
                'live_content_source', defaults.LIVE_CONTENT_SOURCE))
        subscription_manifest = tempfile.NamedTemporaryFile(
            mode='w+', prefix='subscription_manifest', delete=False)
        templating.dump_data_to_temp_yaml(subscription_yaml_data,
                                          subscription_manifest.name)
        run_cmd(f"oc create -f {subscription_manifest.name}")
        subscription_plan_approval = config.DEPLOYMENT.get(
            'subscription_plan_approval')
        if subscription_plan_approval == 'Manual':
            wait_for_install_plan_and_approve(self.namespace)
Example #9
0
 def set_registry_to_managed_state(self):
     """
     In order to be able to deploy from stage we need to change
     image registry config to Managed state.
     More described in BZs:
     https://bugzilla.redhat.com/show_bug.cgi?id=1806593
     https://bugzilla.redhat.com/show_bug.cgi?id=1807471#c3
     We need to change to managed state as described here:
     https://github.com/red-hat-storage/ocs-ci/issues/1436
     So this is not suppose to be deleted as WA case we really need to do
     this operation for OCS deployment as was originally done here:
     https://github.com/red-hat-storage/ocs-ci/pull/1437
     Currently it has to be moved here to enable CA certificate to be
     properly propagated for the stage deployment as mentioned in BZ.
     """
     if (config.ENV_DATA['platform'] not in constants.CLOUD_PLATFORMS):
         run_cmd(
             f'oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p '
             f'\'{{"spec":{{"storage": {{"emptyDir":{{}}}}}}}}\'')
         run_cmd(
             f'oc patch {constants.IMAGE_REGISTRY_CONFIG} --type merge -p '
             f'\'{{"spec":{{"managementState": "Managed"}}}}\'')
Example #10
0
    def flexy_post_processing(self):
        """
        Update global pull-secret and configure ntp (if required).
        """
        # Apply pull secrets on ocp cluster
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))
        secret_cmd = (
            f"oc set data secret/pull-secret "
            f"--kubeconfig {kubeconfig} "
            f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
            f"--from-file=.dockerconfigjson={constants.DATA_DIR}/pull-secret")
        run_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            run_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
Example #11
0
def list_clusters(provider=None):
    """
    Returns info about the cluster which is taken from the ibmcloud command.

    Args:
        provider (str): Provider type (classic, vpc-classic, vpc-gen2).

    """
    cmd = "ibmcloud ks clusters -s -json"
    if provider:
        cmd += f" --provider {provider}"
    out = run_cmd(cmd)
    return json.loads(out)
Example #12
0
    def restart_nodes(self, nodes, timeout=900, wait=True):
        """
        Reboot the nodes on IBM Cloud.

        Args:
            nodes (list): The worker node instance

        """
        logger.info("restarting nodes")
        login()
        provider_id = nodes[0].get()["spec"]["providerID"]
        cluster_id = provider_id.split("/")[5]

        cmd = f"ibmcloud ks workers --cluster {cluster_id} --output json"
        out = run_cmd(cmd)
        worker_nodes = json.loads(out)

        if len(worker_nodes) > 0:
            for node in worker_nodes:
                cmd = f"ibmcloud ks worker reboot --cluster {cluster_id} --worker {node['id']} -f"
                out = run_cmd(cmd)
                logger.info(f"Node restart command output: {out}")
Example #13
0
def list_cluster():
    """
    Returns info about the openshift dedciated clusters which is taken from the OCM command.

    """
    cmd = "ocm list clusters --columns name,state"
    out = run_cmd(cmd)
    result = out.strip().split("\n")
    cluster_list = []
    for each_line in result[1:]:
        name, state = each_line.split()
        cluster_list.append([name, state])
    return cluster_list
Example #14
0
def _get_disk_by_id(worker):
    """
    Retrieve disk by-id on a worker node using the debug pod

    Args:
        worker: worker node to get disks by-id for

    Returns:
        str: stdout of disk by-id command

    """
    cmd = f"oc debug nodes/{worker} " f"-- chroot /host ls -la /dev/disk/by-id/"
    return run_cmd(cmd)
Example #15
0
    def create_catalog_source_yaml(self):
        """
        Create OLM YAML file

        """
        try:
            catalog_source_data = templating.load_yaml(
                constants.CATALOG_SOURCE_YAML)
            image = config.DEPLOYMENT.get(
                "ocs_registry_image",
                config.DEPLOYMENT["default_ocs_registry_image"])
            catalog_source_data["spec"]["image"] = image
            catalog_source_manifest = tempfile.NamedTemporaryFile(
                mode="w+", prefix="catalog_source_manifest", delete=False)
            templating.dump_data_to_temp_yaml(catalog_source_data,
                                              catalog_source_manifest.name)
            run_cmd(f"oc create -f {catalog_source_manifest.name}",
                    timeout=300)
            run_cmd(f"oc create -f {constants.OLM_YAML}", timeout=300)
            time.sleep(60)
        except Exception as e:
            logger.info(e)
Example #16
0
    def login_as_sa(self):
        """
        Logs in as system:admin

        Returns:
            str: output of login command
        """
        kubeconfig = os.getenv('KUBECONFIG')
        command = f"oc login -u system:admin "
        if kubeconfig:
            command += f"--kubeconfig {kubeconfig}"
        status = run_cmd(command)
        return status
Example #17
0
def enable_route_and_create_ca_for_registry_access():
    """
    Function to enable route and to create ca,
    copy to respective location for registry access

    Raises:
        AssertionError: When failure in enabling registry default route

    """
    ocp_obj = ocp.OCP(kind=constants.CONFIG,
                      namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE)
    assert ocp_obj.patch(
        resource_name=constants.IMAGE_REGISTRY_RESOURCE_NAME,
        params='{"spec": {"defaultRoute": true}}',
        format_type="merge",
    ), "Registry pod defaultRoute enable is not success"
    logger.info("Enabled defaultRoute to true")
    ocp_obj = ocp.OCP()
    crt_cmd = (f"get secret {constants.DEFAULT_ROUTE_CRT} "
               f"-n {constants.OPENSHIFT_INGRESS_NAMESPACE} -o yaml")
    crt_dict = ocp_obj.exec_oc_cmd(command=crt_cmd)
    crt = crt_dict.get("data").get("tls.crt")
    route = get_default_route_name()
    if not os.path.exists("/tmp/secret"):
        run_cmd(cmd="mkdir /tmp/secret")
    with open(f"/tmp/secret/{route}.crt", "wb") as temp:
        temp.write(base64.b64decode(crt))
    master_list = node.get_master_nodes()
    ocp.rsync(
        src="/tmp/secret/",
        dst="/etc/pki/ca-trust/source/anchors",
        node=master_list[0],
        dst_node=True,
    )
    ocp_obj.exec_oc_debug_cmd(node=master_list[0],
                              cmd_list=["update-ca-trust enable"])
    logger.info(
        "Created base64 secret, copied to source location and enabled ca-trust"
    )
Example #18
0
    def create_obc(self):
        """
        OBC creation for RGW and Nooba

        """
        if config.ENV_DATA["platform"] in constants.ON_PREM_PLATFORMS:
            obc_rgw = templating.load_yaml(constants.RGW_OBC_YAML)
            obc_rgw_data_yaml = tempfile.NamedTemporaryFile(
                mode="w+", prefix="obc_rgw_data", delete=False)
            templating.dump_data_to_temp_yaml(obc_rgw, obc_rgw_data_yaml.name)
            logger.info("Creating OBC for rgw")
            run_cmd(f"oc create -f {obc_rgw_data_yaml.name}", timeout=2400)
            self.obc_rgw = obc_rgw["metadata"]["name"]

        obc_nooba = templating.load_yaml(constants.MCG_OBC_YAML)
        obc_mcg_data_yaml = tempfile.NamedTemporaryFile(mode="w+",
                                                        prefix="obc_mcg_data",
                                                        delete=False)
        templating.dump_data_to_temp_yaml(obc_nooba, obc_mcg_data_yaml.name)
        logger.info("create OBC for mcg")
        run_cmd(f"oc create -f {obc_mcg_data_yaml.name}", timeout=2400)
        self.obc_mcg = obc_nooba["metadata"]["name"]
Example #19
0
    def upload_helpers(self, ocp_repo):
        """
        Upload helper files to pod for OCP installation on RHEL
        Helper Files::

            - ssh_key pem
            - ocp repo
            - ocp pem
            - kubeconfig
            - pull secret
            - inventory yaml

        Args:
            ocp_repo (str): OCP repo to upload

        """
        upload(self.pod_name, self.ssh_key_pem, constants.POD_UPLOADPATH)
        upload(self.pod_name, ocp_repo, constants.YUM_REPOS_PATH)
        upload(self.pod_name, self.ops_mirror_pem, constants.PEM_PATH)
        upload(self.pod_name, self.kubeconfig, constants.POD_UPLOADPATH)
        upload(self.pod_name, self.pull_secret_path, constants.POD_UPLOADPATH)
        if config.ENV_DATA['folder_structure']:
            inventory_yaml_haproxy = self.create_inventory_for_haproxy()
            upload(
                self.pod_name,
                inventory_yaml_haproxy,
                constants.POD_UPLOADPATH
            )
            cmd = (
                f"ansible-playbook -i {inventory_yaml_haproxy} "
                f"{self.haproxy_playbook} --private-key={self.ssh_key_pem} -v"
            )
            run_cmd(cmd)
        self.inventory_yaml = self.create_inventory()
        upload(
            self.pod_name,
            self.inventory_yaml,
            constants.POD_UPLOADPATH
        )
Example #20
0
    def exec_oc_cmd(self,
                    command,
                    out_yaml_format=True,
                    secrets=None,
                    timeout=600,
                    ignore_error=False,
                    **kwargs):
        """
        Executing 'oc' command

        Args:
            command (str): The command to execute (e.g. create -f file.yaml)
                without the initial 'oc' at the beginning
            out_yaml_format (bool): whether to return  yaml loaded python
                object or raw output
            secrets (list): A list of secrets to be masked with asterisks
                This kwarg is popped in order to not interfere with
                subprocess.run(``**kwargs``)
            timeout (int): timeout for the oc_cmd, defaults to 600 seconds
            ignore_error (bool): True if ignore non zero return code and do not
                raise the exception.

        Returns:
            dict: Dictionary represents a returned yaml file.
            str: If out_yaml_format is False.

        """
        oc_cmd = "oc "
        kubeconfig = os.getenv('KUBECONFIG')
        if self.namespace:
            oc_cmd += f"-n {self.namespace} "

        if kubeconfig:
            oc_cmd += f"--kubeconfig {kubeconfig} "

        oc_cmd += command
        out = run_cmd(cmd=oc_cmd,
                      secrets=secrets,
                      timeout=timeout,
                      ignore_error=ignore_error,
                      **kwargs)

        try:
            if out.startswith('hints = '):
                out = out[out.index('{'):]
        except ValueError:
            pass

        if out_yaml_format:
            return yaml.safe_load(out)
        return out
Example #21
0
def gather_bootstrap():
    """
    Gather debugging data for a failing-to-bootstrap control plane.
    Data is placed in the `gather_bootstrap` directory under the log directory.

    Raises:
        NodeNotFoundError: If we are unable to retrieve the IP of any master
            nodes

    """
    logger.info("Running gather bootstrap")
    gather_bootstrap_dir = os.path.expanduser(os.path.join(
        config.RUN['log_dir'], 'gather_bootstrap'
    ))
    openshift_install = os.path.join(
        config.RUN.get('bin_dir'),
        'openshift-install'
    )
    ssh_key = os.path.expanduser(config.DEPLOYMENT.get('ssh_key_private'))
    data = get_gather_bootstrap_node_data()
    bootstrap_ip = data['bootstrap_ip']
    logger.debug('Bootstrap IP: %s', bootstrap_ip)
    master_ips = data['master_ips']
    logger.debug('Master IPs: %s', master_ips)
    cmd = (
        f"{openshift_install} gather bootstrap --bootstrap {bootstrap_ip} "
        f"--dir {gather_bootstrap_dir} --log-level debug --key {ssh_key} "
    )
    if len(master_ips) == 0:
        logger.warning(
            'No master IPs were found. '
            'Adding master `None` so we still gather logs from bootstrap node'
        )
        cmd += "--master None "
    else:
        for master in master_ips:
            cmd += f"--master {master} "
    run_cmd(cmd)
Example #22
0
    def cluster_filler(self):
        curl_cmd = (
            f""" curl {constants.REMOTE_FILE_URL} --output {constants.FILE_PATH} """
        )
        logging.info("downloading......")
        run_cmd(cmd=curl_cmd)
        logging.info("finished")
        with ThreadPoolExecutor() as executor:
            for pod in self.pods_to_fill:
                executor.submit(
                    pod_helpers.upload,
                    pod.name,
                    constants.FILE_PATH,
                    "/mnt/",
                    namespace=self.namespace,
                )
                logging.info(f"### initiated downloader for {pod.name}")

        filler_executor = ThreadPoolExecutor()
        while not self.cluster_filled:
            for copy_iter in range(self.concurrent_copies):
                for each_pod in self.pods_to_fill:
                    self.used_capacity = get_percent_used_capacity()
                    logging.info(
                        f"### used capacity %age = {self.used_capacity}")
                    if self.used_capacity <= self.percent_required_filled:
                        filler_executor.submit(self.filler, each_pod)
                        logging.info(
                            f"#### Ran copy operation on pod {each_pod.name}. copy_iter # {copy_iter}"
                        )
                    else:
                        logging.info(
                            f"############ Cluster filled to the expected capacity "
                            f"{self.percent_required_filled}")
                        self.cluster_filled = True
                        break
                if self.cluster_filled:
                    return True
    def upload_helpers(self, ocp_repo):
        """
        Upload helper files to pod for OCP installation on RHEL
        Helper Files::

            - ssh_key pem
            - ocp repo
            - ocp pem
            - kubeconfig
            - pull secret
            - inventory yaml

        Args:
            ocp_repo (str): OCP repo to upload

        """
        upload(self.pod_name, self.ssh_key_pem, constants.POD_UPLOADPATH)
        upload(self.pod_name, ocp_repo, constants.YUM_REPOS_PATH)
        # prepare and copy credential files for mirror.openshift.com
        with prepare_mirror_openshift_credential_files() as (
                mirror_user_file,
                mirror_password_file,
        ):
            upload(self.pod_name, mirror_user_file, constants.YUM_VARS_PATH)
            upload(self.pod_name, mirror_password_file,
                   constants.YUM_VARS_PATH)
        upload(self.pod_name, self.kubeconfig, constants.POD_UPLOADPATH)
        upload(self.pod_name, self.pull_secret_path, constants.POD_UPLOADPATH)
        if config.ENV_DATA["folder_structure"]:
            inventory_yaml_haproxy = self.create_inventory_for_haproxy()
            upload(self.pod_name, inventory_yaml_haproxy,
                   constants.POD_UPLOADPATH)
            cmd = (
                f"ansible-playbook -i {inventory_yaml_haproxy} "
                f"{self.haproxy_playbook} --private-key={self.ssh_key_pem} -v")
            run_cmd(cmd)
        self.inventory_yaml = self.create_inventory()
        upload(self.pod_name, self.inventory_yaml, constants.POD_UPLOADPATH)
Example #24
0
def svt_cleanup():
    """
    Removes clonned SVT project and virtual environemt and Projects
    Created while running SVT

    Raises:
        BaseException: In case any erros occured while removing project and ENV.

    Returns:
        bool: True if No exceptions, False otherwise

    """
    ns_obj = ocp.OCP(kind="namespace")
    try:
        shutil.rmtree("/tmp/svt")
        shutil.rmtree("/tmp/venv")
    except BaseException:
        log.error("Error while cleaning SVT project")

    try:
        project_list = [
            "cakephp-mysql0",
            "dancer-mysql0",
            "django-postgresql0",
            "eap64-mysql0",
            "nodejs-mongodb0",
            "rails-postgresql0",
            "tomcat8-mongodb0",
        ]
        # Reset namespace to default
        ocp.switch_to_default_rook_cluster_project()
        for project in project_list:
            run_cmd(f"oc delete project {project}")
            ns_obj.wait_for_delete(resource_name=project)

        return True
    except Exception:
        return False
Example #25
0
    def test_sql_workload_simple(self, ripsaw):
        """
        This is a basic pgsql workload
        """
        # Deployment postgres
        log.info("Deploying postgres database")
        ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml')
        ripsaw.setup_postgresql()
        run_cmd('bin/oc wait --for condition=ready pod '
                '-l app=postgres '
                '--timeout=120s')

        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_data = templating.load_yaml_to_dict(constants.PGSQL_BENCHMARK_YAML)
        pg_obj = OCS(**pg_data)
        pg_obj.create()
        # Wait for pgbench pod to be created
        log.info("waiting for pgbench benchmark to create, "
                 f"PGbench pod name: {pg_obj.name} ")
        wait_time = 30
        log.info(f"Waiting {wait_time} seconds...")
        time.sleep(wait_time)

        pgbench_pod = run_cmd('bin/oc get pods -l '
                              'app=pgbench-client -o name')
        pgbench_pod = pgbench_pod.split('/')[1]
        run_cmd('bin/oc wait --for condition=Initialized '
                f'pods/{pgbench_pod} '
                '--timeout=60s')
        run_cmd('bin/oc wait --for condition=Complete jobs '
                '-l app=pgbench-client '
                '--timeout=300s')

        # Running pgbench and parsing logs
        output = run_cmd(f'bin/oc logs {pgbench_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info("*******PGBench output log*********\n" f"{pg_output}")
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour("PGBench failed to run, "
                                          "no data found on latency_avg")
        log.info("PGBench has completed successfully")

        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark:")
        pg_obj.delete()
Example #26
0
def validate_cluster_on_pvc():
    """
    Validate creation of PVCs for MON and OSD pods.
    Also validate that those PVCs are attached to the OCS pods

    Raises:
         AssertionError: If PVC is not mounted on one or more OCS pods

    """
    # Get the PVCs for selected label (MON/OSD)
    ns = config.ENV_DATA['cluster_namespace']
    ocs_pvc_obj = get_all_pvc_objs(namespace=ns)

    # Check all pvc's are in bound state

    pvc_names = []
    for pvc_obj in ocs_pvc_obj:
        if (pvc_obj.name.startswith(constants.DEFAULT_DEVICESET_PVC_NAME)
                or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME)):
            assert pvc_obj.status == constants.STATUS_BOUND, (
                f"PVC {pvc_obj.name} is not Bound"
            )
            logger.info(f"PVC {pvc_obj.name} is in Bound state")
            pvc_names.append(pvc_obj.name)

    mon_pods = get_pod_name_by_pattern('rook-ceph-mon', ns)
    if not config.DEPLOYMENT.get('local_storage'):
        logger.info("Validating all mon pods have PVC")
        validate_ocs_pods_on_pvc(mon_pods, pvc_names)
    else:
        logger.debug(
            "Skipping validation if all mon pods have PVC because in LSO "
            "deployment we don't have mon pods backed by PVC"
        )
    logger.info("Validating all osd pods have PVC")
    osd_deviceset_pods = get_pod_name_by_pattern(
        'rook-ceph-osd-prepare-ocs-deviceset', ns
    )
    validate_ocs_pods_on_pvc(osd_deviceset_pods, pvc_names)
    osd_pods = get_pod_name_by_pattern('rook-ceph-osd', ns, filter='prepare')
    for ceph_pod in mon_pods + osd_pods:
        out = run_cmd(f'oc -n {ns} get pods {ceph_pod} -o yaml')
        out_yaml = yaml.safe_load(out)
        for vol in out_yaml['spec']['volumes']:
            if vol.get('persistentVolumeClaim'):
                claimName = vol.get('persistentVolumeClaim').get('claimName')
                logger.info(f"{ceph_pod} backed by pvc {claimName}")
                assert claimName in pvc_names, (
                    "Ceph Internal Volume not backed by PVC"
                )
Example #27
0
def get_rook_version():
    """
    Get the rook image information from rook-ceph-operator pod

    Returns:
        str: rook version

    """
    namespace = ocsci_config.ENV_DATA["cluster_namespace"]
    rook_operator = get_pod_name_by_pattern("rook-ceph-operator", namespace)
    out = run_cmd(f"oc -n {namespace} get pods {rook_operator[0]} -o yaml", )
    version = yaml.safe_load(out)
    rook_version = version["spec"]["containers"][0]["image"]
    return rook_version
Example #28
0
    def login(self, user, password):
        """
        Logs user in

        Args:
            user (str): Name of user to be logged in
            password (str): Password of user to be logged in

        Returns:
            str: output of login command
        """
        command = f"oc login -u {user} -p {password}"
        status = run_cmd(command)
        return status
Example #29
0
    def new_project(self, project_name):
        """
        Creates a new project

        Args:
            project_name (str): Name of the project to be created

        Returns:
            bool: True in case project creation succeeded, False otherwise
        """
        command = f"oc new-project {project_name}"
        if f'Now using project "{project_name}"' in run_cmd(f"{command}"):
            return True
        return False
Example #30
0
    def deploy_ocp(self, log_cli_level='DEBUG'):
        """
        Base deployment steps, the rest should be implemented in the child
        class.

        Args:
            log_cli_level (str): log level for installer (default: DEBUG)
        """
        self.ocp_deployment = self.OCPDeployment()
        self.ocp_deployment.deploy_prereq()
        self.ocp_deployment.deploy(log_cli_level)
        # logging the cluster UUID so that we can ask for it's telemetry data
        cluster_id = run_cmd("oc get clusterversion version -o jsonpath='{.spec.clusterID}'")
        logger.info(f"clusterID (UUID): {cluster_id}")