Exemplo n.º 1
0
    def make_directory(self):
        """
        Check if directory to store must gather logs already exist
        and use new directory if so.

        Returns:
            str: Logs directory

        """
        index = 1
        directory = ocsci_log_path()
        while os.path.isdir(directory + "_ocs_logs"):
            index += 1
            directory = ocsci_log_path() + f"_{index}"

        return directory
Exemplo n.º 2
0
def main(arguments):
    init_ocsci_conf(arguments)
    pytest_logs_dir = utils.ocsci_log_path()
    utils.create_directory_path(framework.config.RUN['log_dir'])
    arguments.extend([
        '-p', 'ocs_ci.framework.pytest_customization.ocscilib',
        '-p', 'ocs_ci.framework.pytest_customization.marks',
        '-p', 'ocs_ci.framework.pytest_customization.reports',
        '--logger-logsdir', pytest_logs_dir,
    ])
    return pytest.main(arguments)
Exemplo n.º 3
0
    def setup(self):
        """
        Setting up the environment for each performance and scale test

        Args:
            name (str): The test name that will use in the performance dashboard
        """
        log.info("Setting up test environment")
        self.es = None  # place holder for the incluster deployment elasticsearch
        self.es_backup = None  # place holder for the elasticsearch backup
        self.main_es = None  # place holder for the main elasticsearch object
        self.benchmark_obj = None  # place holder for the benchmark object
        self.client_pod = None  # Place holder for the client pod object
        self.dev_mode = config.RUN["cli_params"].get("dev_mode")
        self.pod_obj = OCP(kind="pod", namespace=benchmark_operator.BMO_NAME)
        self.initialize_test_crd()

        # Place holders for test results file (all sub-tests together)
        self.results_file = ""

        # All tests need a uuid for the ES results, benchmark-operator base test
        # will overrite it with uuid pulling from the benchmark pod
        self.uuid = uuid4().hex

        # Getting the full path for the test logs
        self.full_log_path = os.environ.get("PYTEST_CURRENT_TEST").split(" ")[0]
        self.full_log_path = (
            self.full_log_path.replace("::", "/").replace("[", "-").replace("]", "")
        )
        self.full_log_path = os.path.join(ocsci_log_path(), self.full_log_path)
        log.info(f"Logs file path name is : {self.full_log_path}")

        # Getting the results path as a list
        self.results_path = self.full_log_path.split("/")
        self.results_path.pop()

        # List of test(s) for checking the results
        self.workloads = []

        # Collecting all Environment configuration Software & Hardware
        # for the performance report.
        self.environment = get_environment_info()
        self.environment["clusterID"] = get_running_cluster_id()

        self.ceph_cluster = CephCluster()
        self.used_capacity = self.get_cephfs_data()

        self.get_osd_info()

        self.get_node_info(node_type="master")
        self.get_node_info(node_type="worker")
Exemplo n.º 4
0
    def make_directory(self):
        """
        Checks if directory that contains must gather logs already exist
        and use new directory if so.

        Returns:
            str: Logs directory

        """
        index = 1
        directory = ocsci_log_path()
        while os.path.isdir(directory + "_ocs_logs"):
            index += 1
            directory = ocsci_log_path() + f"_{index}"

        try:
            os.path.exists(directory)
            logger.info(f'Directory created successfully'
                        f'in path {directory}')
            return directory
        except FileNotFoundError:
            logger.error("Failed to create logs directory")
            raise
Exemplo n.º 5
0
def main(argv=None):
    arguments = argv or sys.argv[1:]
    init_ocsci_conf(arguments)
    for i in range(framework.config.nclusters):
        framework.config.switch_ctx(i)
        pytest_logs_dir = utils.ocsci_log_path()
        utils.create_directory_path(framework.config.RUN["log_dir"])
    arguments.extend([
        "-p",
        "ocs_ci.framework.pytest_customization.ocscilib",
        "-p",
        "ocs_ci.framework.pytest_customization.marks",
        "-p",
        "ocs_ci.framework.pytest_customization.reports",
        "--logger-logsdir",
        pytest_logs_dir,
    ])
    return pytest.main(arguments)
Exemplo n.º 6
0
def main(argv=None):
    arguments = argv or sys.argv[1:]
    init_ocsci_conf(arguments)
    pytest_logs_dir = utils.ocsci_log_path()
    utils.create_directory_path(framework.config.RUN['log_dir'])
    launch_name = utils.get_testrun_name() + getuser()
    arguments.extend([
        '-p',
        'ocs_ci.framework.pytest_customization.ocscilib',
        '-p',
        'ocs_ci.framework.pytest_customization.marks',
        '-p',
        'ocs_ci.framework.pytest_customization.reports',
        '--logger-logsdir',
        pytest_logs_dir,
        '--rp-launch',
        launch_name,
    ])
    return pytest.main(arguments)
Exemplo n.º 7
0
def main(argv=None):
    arguments = argv or sys.argv[1:]
    init_ocsci_conf(arguments)
    pytest_logs_dir = utils.ocsci_log_path()
    utils.create_directory_path(framework.config.RUN["log_dir"])
    launch_name = f"{utils.get_testrun_name()}-{getuser()}"
    arguments.extend([
        "-p",
        "ocs_ci.framework.pytest_customization.ocscilib",
        "-p",
        "ocs_ci.framework.pytest_customization.marks",
        "-p",
        "ocs_ci.framework.pytest_customization.reports",
        "--logger-logsdir",
        pytest_logs_dir,
        "--rp-launch",
        launch_name,
    ])
    return pytest.main(arguments)
Exemplo n.º 8
0
)
from ocs_ci.utility.utils import ocsci_log_path
from ocs_ci.utility import utils, templating
from ocs_ci.ocs.exceptions import UnexpectedBehaviour

log = logging.getLogger(__name__)

# Namespace and noobaa storage class
namespace = constants.OPENSHIFT_STORAGE_NAMESPACE
sc_name = constants.NOOBAA_SC
# Number of scaled obc count
scale_obc_count = 500
# Number of obc creating by batch
num_obc_batch = 50
# Scale data file
log_path = ocsci_log_path()
obc_scaled_data_file = f"{log_path}/obc_scale_data_file.yaml"


@pre_upgrade
@skipif_external_mode
@skipif_bm
@pytest.mark.polarion_id("OCS-3987")
def test_scale_obc_pre_upgrade(tmp_path, timeout=60):
    """
    Create scaled MCG OBC using Noobaa storage class before upgrade
    Save scaled obc data in a file for post upgrade validation
    """
    obc_scaled_list = []
    log.info(f"Start creating  {scale_obc_count} " f"OBC in a batch of {num_obc_batch}")
    for i in range(int(scale_obc_count / num_obc_batch)):
Exemplo n.º 9
0
    def test_vdbench_workload(self, template, with_ocs, load, label_nodes,
                              ripsaw, servers, threads, blocksize, fileio,
                              samples, width, depth, files, file_size, runtime,
                              pause):
        """
        Run VDBench Workload

        Args :
            template (str) : Name of yaml file that will used as a template
            with_ocs (bool) : This parameter will indicate if the test will
                              run on the same nodes as the OCS
            load (int) : load to run on the storage in percentage of the capacity.
            label_nodes (fixture) : This fixture is labeling the worker(s)
                                    that will used for App. pod(s)
            ripsaw (fixture) : Fixture to deploy the ripsaw benchmarking operator
            servers (int) : Number of servers (pods) that will run the IO
            threads (int) : Number of threads that will run on each server
            blocksize (list - str): List of BlockSize - must add the 'K' to it
            fileio (str) : How to select file for the IO : random / sequential
            samples (int) : Number of time(s) to run each test
            width (int) : Width of directory tree to create
            depth (int) : Depth of directory tree to create
            files (int) : Number of files to create in each directory
            file_size (int) : File size (in MB) to create
            runtime (int) : Time (in Sec.) for each test iteration
            pause (int) : Time (in Min.) to pause between each test iteration.
        """
        log.info(f'going to use {template} as template')
        log.info("Apply Operator CRD")

        crd = 'resources/crds/ripsaw_v1alpha1_ripsaw_crd.yaml'
        ripsaw.apply_crd(crd)

        log.info('Running vdbench benchmark')
        if template:
            template = os.path.join(constants.TEMPLATE_VDBENCH_DIR, template)
        else:
            template = constants.VDBENCH_BENCHMARK_YAML
        sf_data = templating.load_yaml(template)

        target_results = template + 'Results'

        log.info('Calculating Storage size....')
        ceph_cluster = CephCluster()
        total_capacity = ceph_cluster.get_ceph_capacity()
        assert total_capacity > constants.VDBENCH_MIN_CAPACITY, (
            "Storage capacity is too low for performance testing")
        log.info(f'The Total usable capacity is {total_capacity}')

        if load:
            width = constants.VDBENCH_WIDTH
            depth = constants.VDBENCH_DEPTH
            file_size = constants.VDBENCH_FILE_SIZE
            capacity_per_pod = constants.VDBENCH_CAP_PER_POD
            total_dirs = width**depth
            log.info(f'The total dirs in the tree {total_dirs}')
            log.info(f'Going to run with {load} % of the capacity load.')
            tested_capacity = round(total_capacity * 1024 * load / 100)
            log.info(f'Tested capacity is {tested_capacity} MB')
            servers = round(tested_capacity / capacity_per_pod)
            """
            To spread the application pods evenly on all workers or application nodes and at least 2 app pods
            per node.
            """
            nodes = len(
                node.get_typed_nodes(node_type=constants.WORKER_MACHINE))
            if not with_ocs:
                nodes = len(
                    machine.get_labeled_nodes(
                        f'node-role.kubernetes.io/app={constants.APP_NODE_LABEL}'
                    ))
            log.info(f'Going to use {nodes} nodes for the test !')
            servers = round(servers / nodes) * nodes
            if servers < (nodes * 2):
                servers = nodes * 2

            files = round(tested_capacity / servers / total_dirs)
            total_files = round(files * servers * total_dirs)
            log.info(f'number of pods is {servers}')
            log.info(f'Going to create {total_files} files !')
            log.info(f'number of files in dir is {files}')
        """
            Setting up the parameters for this test
        """
        if servers:
            sf_data['spec']['workload']['args']['servers'] = servers
            target_results = target_results + '-' + str(servers)
        if threads:
            sf_data['spec']['workload']['args']['threads'] = threads
            target_results = target_results + '-' + str(threads)
        if fileio:
            sf_data['spec']['workload']['args']['fileio'] = fileio
            target_results = target_results + '-' + str(fileio)
        if samples:
            sf_data['spec']['workload']['args']['samples'] = samples
            target_results = target_results + '-' + str(samples)
        if width:
            sf_data['spec']['workload']['args']['width'] = width
            target_results = target_results + '-' + str(width)
        if depth:
            sf_data['spec']['workload']['args']['depth'] = depth
            target_results = target_results + '-' + str(depth)
        if files:
            sf_data['spec']['workload']['args']['files'] = files
            target_results = target_results + '-' + str(files)
        if file_size:
            sf_data['spec']['workload']['args']['file_size'] = file_size
            target_results = target_results + '-' + str(file_size)
        if runtime:
            sf_data['spec']['workload']['args']['runtime'] = runtime
            target_results = target_results + '-' + str(runtime)
        if pause:
            sf_data['spec']['workload']['args']['pause'] = pause
            target_results = target_results + '-' + str(pause)
        if len(blocksize) > 0:
            sf_data['spec']['workload']['args']['bs'] = blocksize
            target_results = target_results + '-' + '_'.join(blocksize)
        if with_ocs:
            if sf_data['spec']['workload']['args']['pin_server']:
                del sf_data['spec']['workload']['args']['pin_server']
        """
            Calculating the size of the volume that need to be test, it should
            be at least twice in the size then the size of the files, and at
            least 100Gi.
            since the file_size is in Kb and the vol_size need to be in Gb,
            more calculation is needed.
        """
        vol_size = int((files * total_dirs) * file_size * 1.3)
        log.info('number of files to create : {}'.format(
            int(files * (width**depth))))
        log.info(f'The size of all files is : {vol_size}MB')
        vol_size = int(vol_size / 1024)
        if vol_size < 100:
            vol_size = 100
        sf_data['spec']['workload']['args']['storagesize'] = f'{vol_size}Gi'

        log.debug(f'output of configuration file is {sf_data}')

        timeout = 86400  # 3600 (1H) * 24 (1D)  = one days

        sf_obj = OCS(**sf_data)
        sf_obj.create()
        # wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(300, 10, get_pod_name_by_pattern,
                                        'vdbench-client', 'my-ripsaw'):
            try:
                if bench_pod[0] is not None:
                    vdbench_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info('Benchmark client pod not ready yet')

        bench_pod = OCP(kind='pod', namespace='my-ripsaw')
        log.info('Waiting for VDBench benchmark to Run')
        assert bench_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_name=vdbench_client_pod,
                                           sleep=30,
                                           timeout=600)
        start_time = time.time()
        while True:
            logs = bench_pod.exec_oc_cmd(f'logs {vdbench_client_pod}',
                                         out_yaml_format=False)
            if 'Test Run Finished' in logs:
                log.info('VdBench Benchmark Completed Successfully')
                break

            if timeout < (time.time() - start_time):
                raise TimeoutError(
                    'Timed out waiting for benchmark to complete')
            time.sleep(30)

        # Getting the results file from the benchmark pod and put it with the
        # test logs.
        # TODO: find the place of the actual test log and not in the parent
        #       logs path
        target_results = '{}/{}.tgz'.format(ocsci_log_path(), target_results)
        pod_results = constants.VDBENCH_RESULTS_FILE
        retrive_files_from_pod(vdbench_client_pod, target_results, pod_results)
Exemplo n.º 10
0
    def create_multi_pvc_pod(self, pvc_count=760, pvcs_per_pod=20, obj_name="obj1"):
        """
        Function to create PVC of different type and attach them to PODs and start IO.

        Args:
            pvc_count (int): Number of PVCs to be created
            pvcs_per_pod (int): No of PVCs to be attached to single pod
            Example, If 20 then a POD will be created with 20PVCs attached
            obj_name (string): Object name prefix string
            tmp_path (pathlib.Path): Directory where a temporary yaml file will

        Returns:
            rbd_pvc_name (list): List all the rbd PVCs names created
            fs_pvc_name (list): List all the fs PVCs names created
            pod_running_list (list): List all the PODs names created

        """

        # Condition to check kube_job batch count, value more than 750 per job
        # will lead to failure in kube_job completion, below value is 1200
        # since it will be divided by 2 i.e. 600 per job max as per below condition
        if pvc_count > 1200:
            raise UnexpectedBehaviour("Kube_job batch count should be lesser than 1200")

        logging.info(f"Start creating {pvc_count} PVC of 2 types RBD-RWO & FS-RWX")
        cephfs_sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS
        rbd_sc_obj = constants.DEFAULT_STORAGECLASS_RBD

        # Get pvc_dict_list, append all the pvc.yaml dict to pvc_dict_list
        rbd_pvc_dict_list, cephfs_pvc_dict_list = ([] for i in range(2))
        rbd_pvc_dict_list.extend(
            construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=int(pvc_count / 2),
                access_mode=constants.ACCESS_MODE_RWO,
                sc_name=rbd_sc_obj,
            )
        )
        cephfs_pvc_dict_list.extend(
            construct_pvc_creation_yaml_bulk_for_kube_job(
                no_of_pvc=int(pvc_count / 2),
                access_mode=constants.ACCESS_MODE_RWX,
                sc_name=cephfs_sc_obj,
            )
        )

        # kube_job for cephfs and rbd PVC creations
        lcl = locals()
        tmp_path = pathlib.Path(ocsci_log_path())
        lcl[f"rbd_pvc_kube_{obj_name}"] = ObjectConfFile(
            name=f"rbd_pvc_kube_{obj_name}",
            obj_dict_list=rbd_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        lcl[f"cephfs_pvc_kube_{obj_name}"] = ObjectConfFile(
            name=f"cephfs_pvc_kube_{obj_name}",
            obj_dict_list=cephfs_pvc_dict_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )

        # Create kube_job for PVC creations
        lcl[f"rbd_pvc_kube_{obj_name}"].create(namespace=self.namespace)
        lcl[f"cephfs_pvc_kube_{obj_name}"].create(namespace=self.namespace)

        # Check all the PVC reached Bound state
        rbd_pvc_name = check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=lcl[f"rbd_pvc_kube_{obj_name}"],
            namespace=self.namespace,
            no_of_pvc=int(pvc_count / 2),
            timeout=60,
        )
        fs_pvc_name = check_all_pvc_reached_bound_state_in_kube_job(
            kube_job_obj=lcl[f"cephfs_pvc_kube_{obj_name}"],
            namespace=self.namespace,
            no_of_pvc=int(pvc_count / 2),
            timeout=60,
        )

        # Construct pod yaml file for kube_job
        pod_data_list = list()
        pod_data_list.extend(
            attach_multiple_pvc_to_pod_dict(
                pvc_list=rbd_pvc_name,
                namespace=self.namespace,
                pvcs_per_pod=pvcs_per_pod,
                deployment_config=True,
                node_selector=self.node_selector,
            )
        )
        pod_data_list.extend(
            attach_multiple_pvc_to_pod_dict(
                pvc_list=fs_pvc_name,
                namespace=self.namespace,
                pvcs_per_pod=pvcs_per_pod,
                deployment_config=True,
                node_selector=self.node_selector,
            )
        )

        # Create kube_job for pod creation
        lcl[f"pod_kube_{obj_name}"] = ObjectConfFile(
            name=f"pod_kube_{obj_name}",
            obj_dict_list=pod_data_list,
            project=self.namespace,
            tmp_path=tmp_path,
        )
        lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace)

        # Check all the POD reached Running state
        pod_running_list = check_all_pod_reached_running_state_in_kube_job(
            kube_job_obj=lcl[f"pod_kube_{obj_name}"],
            namespace=self.namespace,
            no_of_pod=len(pod_data_list),
            timeout=90,
        )

        # Update list with all the kube_job object created, list will be
        # used in cleanup
        self.kube_job_pvc_list.append(lcl[f"rbd_pvc_kube_{obj_name}"])
        self.kube_job_pvc_list.append(lcl[f"cephfs_pvc_kube_{obj_name}"])
        self.kube_job_pod_list.append(lcl[f"pod_kube_{obj_name}"])

        return rbd_pvc_name, fs_pvc_name, pod_running_list
Exemplo n.º 11
0
    def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size):
        """
        Measures pods attachment time in bulk_size bulk

        Args:
            teardown_factory: A fixture used when we want a new resource that was created during the tests
                               to be removed in the teardown phase.
            bulk_size: Size of the bulk to be tested
        Returns:

        """
        # Getting the test start time
        test_start_time = PASTest.get_time()

        log.info(f"Start creating bulk of new {bulk_size} PVCs")

        pvc_objs, _ = helpers.create_multiple_pvcs(
            sc_name=self.sc_obj.name,
            namespace=self.namespace,
            number_of_pvc=bulk_size,
            size=self.pvc_size,
            burst=True,
        )

        for pvc_obj in pvc_objs:
            pvc_obj.reload()
            teardown_factory(pvc_obj)
        with ThreadPoolExecutor(max_workers=5) as executor:
            for pvc_obj in pvc_objs:
                executor.submit(helpers.wait_for_resource_state, pvc_obj,
                                constants.STATUS_BOUND)

                executor.submit(pvc_obj.reload)

        start_time = helpers.get_provision_time(self.interface,
                                                pvc_objs,
                                                status="start")
        end_time = helpers.get_provision_time(self.interface,
                                              pvc_objs,
                                              status="end")
        total_time = (end_time - start_time).total_seconds()
        log.info(
            f"{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds."
        )

        pvc_names_list = []
        for pvc_obj in pvc_objs:
            pvc_names_list.append(pvc_obj.name)

        log.info(f"{self.interface} : Before pod attach")
        bulk_start_time = time.time()
        pod_data_list = list()
        pod_data_list.extend(
            scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_names_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
            ))

        lcl = locals()
        tmp_path = pathlib.Path(ocsci_log_path())
        obj_name = "obj1"
        # Create kube_job for pod creation
        lcl[f"pod_kube_{obj_name}"] = ObjectConfFile(
            name=f"pod_kube_{obj_name}",
            obj_dict_list=pod_data_list,
            project=defaults.ROOK_CLUSTER_NAMESPACE,
            tmp_path=tmp_path,
        )
        lcl[f"pod_kube_{obj_name}"].create(namespace=self.namespace)

        log.info("Checking that pods are running")
        # Check all the PODs reached Running state
        pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job(
            kube_job_obj=lcl[f"pod_kube_{obj_name}"],
            namespace=self.namespace,
            no_of_pod=len(pod_data_list),
            timeout=180,
        )
        for pod_name in pod_running_list:
            pod_obj = get_pod_obj(pod_name, self.namespace)
            teardown_factory(pod_obj)

        bulk_end_time = time.time()
        bulk_total_time = bulk_end_time - bulk_start_time
        log.info(
            f"Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds"
        )

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_log_path = get_full_test_logs_path(cname=self)
        full_log_path += f"-{self.sc}"
        full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, full_log_path))

        full_results.add_key("storageclass", self.sc)
        full_results.add_key("pod_bulk_attach_time", bulk_total_time)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("bulk_size", bulk_size)

        # Getting the test end time
        test_end_time = PASTest.get_time()

        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        # Write the test results into the ES server
        full_results.es_write()
        # write the ES link to the test results in the test log.
        log.info(f"The result can be found at : {full_results.results_link()}")
Exemplo n.º 12
0
    def test_scale_pvcs_pods(self):
        """
        Scale 6000 PVCs and PODs in cluster with 12 worker nodes
        """

        scale_count = 6000
        pvcs_per_pod = 20

        try:
            # Scale
            fioscale = FioPodScale(
                kind=constants.DEPLOYMENTCONFIG,
                node_selector=constants.SCALE_NODE_SELECTOR,
            )
            kube_pod_obj_list, kube_pvc_obj_list = fioscale.create_scale_pods(
                scale_count=scale_count, pvc_per_pod_count=pvcs_per_pod)

            namespace = fioscale.namespace
            scale_round_up_count = scale_count + 80

            # Get PVCs and PODs count and list
            pod_running_list, pvc_bound_list = ([], [])
            for pod_objs in kube_pod_obj_list:
                pod_running_list.extend(
                    scale_lib.check_all_pod_reached_running_state_in_kube_job(
                        kube_job_obj=pod_objs,
                        namespace=namespace,
                        no_of_pod=int(scale_round_up_count / 160),
                    ))
            for pvc_objs in kube_pvc_obj_list:
                pvc_bound_list.extend(
                    scale_lib.check_all_pvc_reached_bound_state_in_kube_job(
                        kube_job_obj=pvc_objs,
                        namespace=namespace,
                        no_of_pvc=int(scale_round_up_count / 16),
                    ))

            logging.info(f"Running PODs count {len(pod_running_list)} & "
                         f"Bound PVCs count {len(pvc_bound_list)} "
                         f"in namespace {fioscale.namespace}")

            # Get kube obj files in the list to update in scale_data_file
            pod_obj_file_list, pvc_obj_file_list = ([], [])
            files = os.listdir(ocsci_log_path())
            for f in files:
                if "pod" in f:
                    pod_obj_file_list.append(f)
                elif "pvc" in f:
                    pvc_obj_file_list.append(f)

            # Write namespace, PVC and POD data in a SCALE_DATA_FILE which
            # will be used during post_upgrade validation tests
            with open(SCALE_DATA_FILE, "a+") as w_obj:
                w_obj.write(str("# Scale Data File\n"))
                w_obj.write(str(f"NAMESPACE: {namespace}\n"))
                w_obj.write(str(f"POD_SCALE_LIST: {pod_running_list}\n"))
                w_obj.write(str(f"PVC_SCALE_LIST: {pvc_bound_list}\n"))
                w_obj.write(str(f"POD_OBJ_FILE_LIST: {pod_obj_file_list}\n"))
                w_obj.write(str(f"PVC_OBJ_FILE_LIST: {pvc_obj_file_list}\n"))

            # Check ceph health status
            utils.ceph_health_check(tries=30)

        except UnexpectedBehaviour:
            TestAddNode.skip_all = True
            logging.info(
                "Cluster is not in expected state, unexpected behaviour")
            raise
Exemplo n.º 13
0
    def test_vdbench_workload(self, template, with_ocs, label_nodes, ripsaw,
                              servers, threads, blocksize, fileio, samples,
                              width, depth, files, file_size, runtime, pause):
        """
        Run VDBench Workload

        Args :
            template (str) : Name of yaml file that will used as a template
            with_ocs (bool) : This parameter will indicate if the test will
                              run on the same nodes as the OCS
            label_nodes (fixture) : This fixture is labeling the worker(s)
                                    that will used for App. pod(s)
            ripsaw (fixture) : Fixture to deploy the ripsaw benchmarking operator
            servers (int) : Number of servers (pods) that will run the IO
            threads (int) : Number of threads that will run on each server
            blocksize (list - str): List of BlockSize - must add the 'K' to it
            fileio (str) : How to select file for the IO : random / sequential
            samples (int) : Number of time(s) to run each test
            width (int) : Width of directory tree to create
            depth (int) : Depth of directory tree to create
            files (int) : Number of files to create in each directory
            file_size (int) : File size (in MB) to create
            runtime (int) : Time (in Sec.) for each test iteration
            pause (int) : Time (in Min.) to pause between each test iteration.
        """
        log.info("Apply Operator CRD")

        crd = 'resources/crds/ripsaw_v1alpha1_ripsaw_crd.yaml'
        ripsaw.apply_crd(crd)

        log.info('Running vdbench benchmark')
        if template:
            template = os.path.join(constants.TEMPLATE_VDBENCH_DIR, template)
        else:
            template = constants.VDBENCH_BENCHMARK_YAML
        sf_data = templating.load_yaml(template)

        target_results = template + 'Results'
        """
            Setting up the parameters for this test
        """
        if servers:
            sf_data['spec']['workload']['args']['servers'] = servers
            target_results = target_results + '-' + str(servers)
        if threads:
            sf_data['spec']['workload']['args']['threads'] = threads
            target_results = target_results + '-' + str(threads)
        if fileio:
            sf_data['spec']['workload']['args']['fileio'] = fileio
            target_results = target_results + '-' + str(fileio)
        if samples:
            sf_data['spec']['workload']['args']['samples'] = samples
            target_results = target_results + '-' + str(samples)
        if width:
            sf_data['spec']['workload']['args']['width'] = width
            target_results = target_results + '-' + str(width)
        if depth:
            sf_data['spec']['workload']['args']['depth'] = depth
            target_results = target_results + '-' + str(depth)
        if files:
            sf_data['spec']['workload']['args']['files'] = files
            target_results = target_results + '-' + str(files)
        if file_size:
            sf_data['spec']['workload']['args']['file_size'] = file_size
            target_results = target_results + '-' + str(file_size)
        if runtime:
            sf_data['spec']['workload']['args']['runtime'] = runtime
            target_results = target_results + '-' + str(runtime)
        if pause:
            sf_data['spec']['workload']['args']['pause'] = pause
            target_results = target_results + '-' + str(pause)
        if len(blocksize) > 0:
            sf_data['spec']['workload']['args']['bs'] = blocksize
            target_results = target_results + '-' + '_'.join(blocksize)
        if with_ocs:
            if sf_data['spec']['workload']['args']['pin_server']:
                del sf_data['spec']['workload']['args']['pin_server']
        """
            Calculating the size of the volume that need to be test, it should
            be at least twice in the size then the size of the files, and at
            least 100Gi.
            since the file_size is in Kb and the vol_size need to be in Gb,
            more calculation is needed.
        """
        vol_size = int((files * (width**depth)) * file_size * 1.3)
        log.info('number of files to create : {}'.format(
            int(files * (width**depth))))
        log.info(f'The size of all files is : {vol_size}MB')
        vol_size = int(vol_size / 1024)
        if vol_size < 100:
            vol_size = 100
        sf_data['spec']['workload']['args']['storagesize'] = f'{vol_size}Gi'

        log.info(sf_data)

        timeout = 86400  # 3600 (1H) * 24 (1D)  = one days

        sf_obj = OCS(**sf_data)
        sf_obj.create()
        # wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(300, 10, get_pod_name_by_pattern,
                                        'vdbench-client', 'my-ripsaw'):
            try:
                if bench_pod[0] is not None:
                    vdbench_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info('Benchmark client pod not ready yet')

        bench_pod = OCP(kind='pod', namespace='my-ripsaw')
        log.info('Waiting for VDBench benchmark to Run')
        assert bench_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_name=vdbench_client_pod,
                                           sleep=30,
                                           timeout=600)
        start_time = time.time()
        while True:
            logs = bench_pod.exec_oc_cmd(f'logs {vdbench_client_pod}',
                                         out_yaml_format=False)
            if 'Test Run Finished' in logs:
                log.info('VdBench Benchmark Completed Successfully')
                break

            if timeout < (time.time() - start_time):
                raise TimeoutError(
                    'Timed out waiting for benchmark to complete')
            time.sleep(30)

        # Getting the results file from the benchmark pod and put it with the
        # test logs.
        # TODO: find the place of the actual test log and not in the parent
        #       logs path
        target_results = '{}/{}.tgz'.format(ocsci_log_path(), target_results)
        pod_results = constants.VDBENCH_RESULTS_FILE
        retrive_files_from_pod(vdbench_client_pod, target_results, pod_results)
Exemplo n.º 14
0
    def test_bulk_pod_attach_performance(self, interface_type, bulk_size):
        """
        Measures pods attachment time in bulk_size bulk

        Args:
            interface_type (str): The interface type to be tested - CephBlockPool / CephFileSystem.
            bulk_size (int): Size of the bulk to be tested
        Returns:

        """
        self.interface = interface_type

        if self.dev_mode:
            bulk_size = 3

        # Initialize some variables
        timeout = bulk_size * 5
        pvc_names_list = list()
        pod_data_list = list()

        # Getting the test start time
        test_start_time = self.get_time()
        csi_start_time = self.get_time("csi")

        log.info(f"Start creating bulk of new {bulk_size} PVCs")
        self.pvc_objs, _ = helpers.create_multiple_pvcs(
            sc_name=Interfaces_info[self.interface]["sc"],
            namespace=self.namespace,
            number_of_pvc=bulk_size,
            size=self.pvc_size,
            burst=True,
            do_reload=False,
        )
        log.info("Wait for all of the PVCs to be in Bound state")
        performance_lib.wait_for_resource_bulk_status("pvc", bulk_size,
                                                      self.namespace,
                                                      constants.STATUS_BOUND,
                                                      timeout, 10)
        # in case of creation faliure, the wait_for_resource_bulk_status function
        # will raise an exception. so in this point the creation succeed
        log.info("All PVCs was created and in Bound state.")

        # Reload all PVC(s) information
        for pvc_obj in self.pvc_objs:
            pvc_obj.reload()
            pvc_names_list.append(pvc_obj.name)
        log.debug(f"The PVCs names are : {pvc_names_list}")

        # Create kube_job for pod creation
        pod_data_list.extend(
            scale_lib.attach_multiple_pvc_to_pod_dict(
                pvc_list=pvc_names_list,
                namespace=self.namespace,
                pvcs_per_pod=1,
            ))
        self.pods_obj = ObjectConfFile(
            name="pod_kube_obj",
            obj_dict_list=pod_data_list,
            project=self.namespace,
            tmp_path=pathlib.Path(ocsci_log_path()),
        )
        log.debug(f"PODs data list is : {json.dumps(pod_data_list, indent=3)}")

        log.info(f"{self.interface} : Before pod attach")
        bulk_start_time = time.time()
        self.pods_obj.create(namespace=self.namespace)
        # Check all the PODs reached Running state
        log.info("Checking that pods are running")
        performance_lib.wait_for_resource_bulk_status("pod", bulk_size,
                                                      self.namespace,
                                                      constants.STATUS_RUNNING,
                                                      timeout, 2)
        log.info("All POD(s) are in Running State.")
        bulk_end_time = time.time()
        bulk_total_time = bulk_end_time - bulk_start_time
        log.info(
            f"Bulk attach time of {bulk_size} pods is {bulk_total_time} seconds"
        )

        csi_bulk_total_time = performance_lib.pod_bulk_attach_csi_time(
            self.interface, self.pvc_objs, csi_start_time, self.namespace)

        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path,
                           "pod_bulk_attachtime"))

        full_results.add_key("storageclass",
                             Interfaces_info[self.interface]["name"])
        full_results.add_key("pod_bulk_attach_time", bulk_total_time)
        full_results.add_key("pod_csi_bulk_attach_time", csi_bulk_total_time)
        full_results.add_key("pvc_size", self.pvc_size)
        full_results.add_key("bulk_size", bulk_size)

        # Getting the test end time
        test_end_time = self.get_time()

        # Add the test time to the ES report
        full_results.add_key("test_time", {
            "start": test_start_time,
            "end": test_end_time
        })

        # Write the test results into the ES server
        self.results_path = helpers.get_full_test_logs_path(cname=self)
        if full_results.es_write():
            res_link = full_results.results_link()
            # write the ES link to the test results in the test log.
            log.info(f"The result can be found at : {res_link}")

            # Create text file with results of all subtests (4 - according to the parameters)
            self.write_result_to_file(res_link)