Exemple #1
0
def initialize_data():
    """
    Initialize the data dictionary with cluster data

    Returns:
        dict: A dictionary contains the data to push to the dashboard
    """
    worker_type = get_typed_nodes(
        num_of_nodes=1
    )[0].data['metadata']['labels']['beta.kubernetes.io/instance-type']

    (ocs_ver_info, _) = get_ocs_version()
    ocs_ver_full = ocs_ver_info['status']['desired']['version']
    m = re.match(r"(\d.\d).(\d)-", ocs_ver_full)
    if m.group(1) is not None:
        ocs_ver = m.group(1)
    platform = config.ENV_DATA['platform']
    if platform.lower() == 'aws':
        platform = platform.upper() + " " + worker_type
    data_template['commitid'] = ocs_ver_full
    data_template['project'] = f"OCS{ocs_ver}"
    data_template['branch'] = ocs_ver_info['spec']['channel']
    data_template['executable'] = ocs_ver
    data_template['environment'] = platform

    return data_template
Exemple #2
0
def initialize_data():
    """
    Initialize the data dictionary with cluster data

    Returns:
        dict: A dictionary contains the data to push to the dashboard
    """

    # worker type is relevant only for cloud instances.
    log.info('Initializing the dashboard data')
    worker_lbl = get_typed_nodes(num_of_nodes=1)[0].data['metadata']['labels']
    if 'beta.kubernetes.io/instance-type' in worker_lbl:
        worker_type = worker_lbl['beta.kubernetes.io/instance-type']
    else:
        # TODO: Maybe for None cloud we can add the Arch ?
        #   worker_type = worker_lbl['kubernetes.io/arch']
        worker_type = ""
    log.info(f'The worker type is {worker_type}')

    (ocs_ver_info, _) = get_ocs_version()
    ocs_ver_full = ocs_ver_info['status']['desired']['version']
    m = re.match(r"(\d.\d).(\d)", ocs_ver_full)
    if m and m.group(1) is not None:
        ocs_ver = m.group(1)
    log.info(f'ocs_ver is {ocs_ver_full}')
    platform = config.ENV_DATA['platform']
    if platform.lower() not in ['vsphere', 'baremetal']:
        platform = f'{platform.upper()} {worker_type}'
    data_template['commitid'] = ocs_ver_full
    data_template['project'] = f"OCS{ocs_ver}"
    data_template['branch'] = ocs_ver_info['spec']['channel']
    data_template['executable'] = ocs_ver
    data_template['environment'] = platform

    return data_template
Exemple #3
0
def log_ocs_version(cluster):
    """
    Fixture handling version reporting for OCS.

    This fixture handles alignment of the version reporting, so that we:

     * report version for each test run (no matter if just deployment, just
       test or both deployment and tests are executed)
     * prevent conflict of version reporting with deployment/teardown (eg. we
       should not run the version logging before actual deployment, or after
       a teardown)

    Version is reported in:

     * log entries of INFO log level during test setup phase
     * ocs_version file in cluster path directory (for copy pasting into bug
       reports)
    """
    teardown = config.RUN['cli_params'].get('teardown')
    deploy = config.RUN['cli_params'].get('deploy')
    if teardown and not deploy:
        log.info("Skipping version reporting for teardown.")
        return
    cluster_version, image_dict = get_ocs_version()
    file_name = os.path.join(config.ENV_DATA['cluster_path'],
                             "ocs_version." + datetime.now().isoformat())
    with open(file_name, "w") as file_obj:
        report_ocs_version(cluster_version, image_dict, file_obj)
    log.info("human readable ocs version info written into %s", file_name)
Exemple #4
0
def initialize_data():
    """
    Initialize the data dictionary with cluster data

    Returns:
        dict: A dictionary contains the data to push to the dashboard
    """

    # worker type is relevant only for cloud instances.
    log.info("Initializing the dashboard data")
    worker_lbl = get_nodes(num_of_nodes=1)[0].data["metadata"]["labels"]
    if "beta.kubernetes.io/instance-type" in worker_lbl:
        worker_type = worker_lbl["beta.kubernetes.io/instance-type"]
    else:
        # TODO: Maybe for None cloud we can add the Arch ?
        #   worker_type = worker_lbl['kubernetes.io/arch']
        worker_type = ""
    log.info(f"The worker type is {worker_type}")

    (ocs_ver_info, _) = get_ocs_version()
    ocs_ver_full = ocs_ver_info["status"]["desired"]["version"]
    m = re.match(r"(\d.\d).(\d)", ocs_ver_full)
    if m and m.group(1) is not None:
        ocs_ver = m.group(1)
    log.info(f"ocs_ver is {ocs_ver_full}")
    platform = config.ENV_DATA["platform"].upper()
    if platform.lower() not in ["vsphere", "baremetal"]:
        platform = f"{platform.upper()} {worker_type}"
    data_template["commitid"] = ocs_ver_full
    data_template["project"] = f"OCS{ocs_ver}"
    data_template["branch"] = ocs_ver_info["spec"]["channel"]
    data_template["executable"] = ocs_ver
    data_template["environment"] = platform

    return data_template
    def test_smallfile_workload(self, ripsaw, es, file_size, files, threads,
                                samples, interface):
        """
        Run SmallFile Workload
        """

        # Loading the main template yaml file for the benchmark
        sf_data = templating.load_yaml(constants.SMALLFILE_BENCHMARK_YAML)

        # getting the name and email  of the user that running the test.
        try:
            user = run_cmd('git config --get user.name').strip()
            email = run_cmd('git config --get user.email').strip()
        except CommandFailed:
            # if no git user define, use the default user from the CR file
            user = sf_data['spec']['test_user']
            email = ''

        # Saving the Original elastic-search IP and PORT - if defined in yaml
        es_server = ""
        es_port = ""
        if 'elasticsearch' in sf_data['spec']:
            if 'server' in sf_data['spec']['elasticsearch']:
                es_server = sf_data['spec']['elasticsearch']['server']
            if 'port' in sf_data['spec']['elasticsearch']:
                es_port = sf_data['spec']['elasticsearch']['port']
        else:
            sf_data['spec']['elasticsearch'] = {}

        # Use the internal define elastic-search server in the test
        sf_data['spec']['elasticsearch'] = {
            'server': es.get_ip(),
            'port': es.get_port()
        }

        log.info("Apply Operator CRD")
        ripsaw.apply_crd('resources/crds/ripsaw_v1alpha1_ripsaw_crd.yaml')
        if interface == constants.CEPHBLOCKPOOL:
            storageclass = constants.DEFAULT_STORAGECLASS_RBD
        else:
            storageclass = constants.DEFAULT_STORAGECLASS_CEPHFS
        log.info(f"Using {storageclass} Storageclass")
        sf_data['spec']['workload']['args']['storageclass'] = storageclass
        log.info("Running SmallFile bench")
        """
            Setting up the parameters for this test
        """
        sf_data['spec']['workload']['args']['file_size'] = file_size
        sf_data['spec']['workload']['args']['files'] = files
        sf_data['spec']['workload']['args']['threads'] = threads
        sf_data['spec']['workload']['args']['samples'] = samples
        sf_data['spec']['clustername'] = get_clustername()
        sf_data['spec']['test_user'] = f'{user}<{email}>'
        """
        Calculating the size of the volume that need to be test, it should
        be at least twice in the size then the size of the files, and at
        least 100Gi.

        Since the file_size is in Kb and the vol_size need to be in Gb, more
        calculation is needed.
        """
        vol_size = int(files * threads * file_size * 3)
        vol_size = int(vol_size / constants.GB2KB)
        if vol_size < 100:
            vol_size = 100
        sf_data['spec']['workload']['args']['storagesize'] = f"{vol_size}Gi"

        sf_obj = OCS(**sf_data)
        sf_obj.create()
        log.info(f'The smallfile yaml file is {sf_data}')

        # wait for benchmark pods to get created - takes a while
        for bench_pod in TimeoutSampler(240, 10, get_pod_name_by_pattern,
                                        'smallfile-client',
                                        constants.RIPSAW_NAMESPACE):
            try:
                if bench_pod[0] is not None:
                    small_file_client_pod = bench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        bench_pod = OCP(kind='pod', namespace=constants.RIPSAW_NAMESPACE)
        log.info("Waiting for SmallFile benchmark to Run")
        assert bench_pod.wait_for_resource(condition=constants.STATUS_RUNNING,
                                           resource_name=small_file_client_pod,
                                           sleep=30,
                                           timeout=600)
        start_time = time.time()

        # After testing manually, changing the timeout
        timeout = 3600

        # Getting the UUID from inside the benchmark pod
        output = bench_pod.exec_oc_cmd(f'exec {small_file_client_pod} -- env')
        for line in output.split():
            if 'uuid=' in line:
                uuid = line.split('=')[1]
        log.info(f'the UUID of the test is : {uuid}')

        # Setting back the original elastic-search information
        sf_data['spec']['elasticsearch'] = {
            'server': es_server,
            'port': es_port
        }

        full_results = SmallFileResultsAnalyse(uuid, sf_data)

        # Initialaize the results doc file.
        full_results.add_key('user', sf_data['spec']['test_user'])
        full_results.add_key('ocp_version', get_ocp_version())
        full_results.add_key('ocp_build', get_build())
        full_results.add_key('ocp_channel', get_ocp_channel())

        # Getting the OCS version
        (ocs_ver_info, _) = get_ocs_version()
        ocs_ver_full = ocs_ver_info['status']['desired']['version']
        m = re.match(r"(\d.\d).(\d)", ocs_ver_full)
        if m and m.group(1) is not None:
            ocs_ver = m.group(1)

        full_results.add_key('ocs_version', ocs_ver)
        full_results.add_key('vendor', get_provider())
        full_results.add_key(
            'start_time', time.strftime('%Y-%m-%dT%H:%M:%SGMT', time.gmtime()))

        # Calculating the total size of the working data set - in GB
        full_results.add_key(
            'dataset', file_size * files * threads *
            full_results.results['clients'] / constants.GB2KB)

        full_results.add_key(
            'global_options', {
                'files': files,
                'file_size': file_size,
                'storageclass':
                sf_data['spec']['workload']['args']['storageclass'],
                'vol_size': sf_data['spec']['workload']['args']['storagesize']
            })

        while True:
            logs = bench_pod.exec_oc_cmd(f'logs {small_file_client_pod}',
                                         out_yaml_format=False)
            if "RUN STATUS DONE" in logs:
                full_results.add_key(
                    'end_time',
                    time.strftime('%Y-%m-%dT%H:%M:%SGMT', time.gmtime()))
                full_results.read()
                if not full_results.dont_check:
                    full_results.add_key('hosts',
                                         full_results.get_clients_list())
                    full_results.init_full_results()
                    full_results.aggregate_host_results()
                    test_status = full_results.aggregate_samples_results()
                    full_results.write()

                    # Creating full link to the results on the ES server
                    res_link = 'http://'
                    res_link += f'{full_results.server}:{full_results.port}/'
                    res_link += f'{full_results.new_index}/_search?q='
                    res_link += f'uuid:{full_results.uuid}'
                    log.info(f'Full results can be found as : {res_link}')
                else:
                    test_status = True

                break

            if timeout < (time.time() - start_time):
                raise TimeoutError(
                    "Timed out waiting for benchmark to complete")
            time.sleep(30)
        assert (not get_logs_with_errors() and test_status), 'Test Failed'