Exemple #1
0
    def export_builds_results_to_googlesheet(
        self, sheet_name='E2E Workloads', sheet_index=3
    ):
        """
        Collect builds results, output to google spreadsheet

        Args:
            sheet_name (str): Name of the sheet
            sheet_index (int): Index of sheet

        """
        # Collect data and export to Google doc spreadsheet
        log.info("Exporting Jenkins data to google spreadsheet")
        g_sheet = GoogleSpreadSheetAPI(
            sheet_name=sheet_name, sheet_index=sheet_index
        )
        for build, time_build in reversed(self.build_completed.items()):
            g_sheet.insert_row(
                [build[1], build[0], time_build], 2
            )
        g_sheet.insert_row(
            ["Project", "Build", "Duration"], 2
        )
        # Capturing versions(OCP, OCS and Ceph) and test run name
        g_sheet.insert_row(
            [f"ocp_version:{utils.get_cluster_version()}",
             f"ocs_build_number:{utils.get_ocs_build_number()}",
             f"ceph_version:{utils.get_ceph_version()}",
             f"test_run_name:{utils.get_testrun_name()}"], 2
        )
Exemple #2
0
    def export_amq_output_to_gsheet(self, amq_output, sheet_name, sheet_index):
        """
        Collect amq data to google spreadsheet

        Args:
            amq_output (dict):  amq output in dict
            sheet_name (str): Name of the sheet
            sheet_index (int): Index of sheet

        """
        # Collect data and export to Google doc spreadsheet
        g_sheet = GoogleSpreadSheetAPI(sheet_name=sheet_name, sheet_index=sheet_index)
        log.info("Exporting amq data to google spreadsheet")

        headers_to_key = []
        values = []
        for key, val in amq_output.items():
            headers_to_key.append(key)
            values.append(val)

        # Update amq_result to gsheet
        g_sheet.insert_row(values, 2)
        g_sheet.insert_row(headers_to_key, 2)

        # Capturing versions(OCP, OCS and Ceph) and test run name
        g_sheet.insert_row(
            [
                f"ocp_version:{utils.get_cluster_version()}",
                f"ocs_build_number:{utils.get_ocs_build_number()}",
                f"ceph_version:{utils.get_ceph_version()}",
                f"test_run_name:{utils.get_testrun_name()}",
            ],
            2,
        )
Exemple #3
0
 def test_run_io(self, size, io_direction, jobs, runtime, depth,
                 sheet_index):
     """
     Test IO
     """
     logging.info(f"Running FIO with:\nsize: {size}\njobs: {jobs}\n"
                  f"runtime: {runtime}\nIO depth: {depth}\n")
     self.pod_obj.run_io('fs',
                         size=size,
                         io_direction=io_direction,
                         jobs=jobs,
                         runtime=runtime,
                         depth=depth)
     logging.info("Waiting for results")
     fio_result = self.pod_obj.get_fio_results()
     logging.info("IOPs after FIO:")
     reads = fio_result.get('jobs')[0].get('read').get('iops')
     writes = fio_result.get('jobs')[0].get('write').get('iops')
     logging.info(f"Read: {reads}")
     logging.info(f"Write: {writes}")
     g_sheet = GoogleSpreadSheetAPI("OCS FIO", sheet_index)
     g_sheet.insert_row([self.interface, reads, writes], 2)
Exemple #4
0
    def export_pgoutput_to_googlesheet(self, pg_output, sheet_name,
                                       sheet_index):
        """
        Collect pgbench output to google spreadsheet

        Args:
            pg_output (list):  pgbench outputs in list
            sheet_name (str): Name of the sheet
            sheet_index (int): Index of sheet

        """
        # Collect data and export to Google doc spreadsheet
        g_sheet = GoogleSpreadSheetAPI(sheet_name=sheet_name,
                                       sheet_index=sheet_index)
        log.info("Exporting pgoutput data to google spreadsheet")
        for pgbench_pod in range(len(pg_output)):
            for run in range(len(pg_output[pgbench_pod][0])):
                run_id = list(pg_output[pgbench_pod][0][run].keys())[0]
                lat_avg = pg_output[pgbench_pod][0][run][run_id]["latency_avg"]
                lat_stddev = pg_output[pgbench_pod][0][run][run_id][
                    "lat_stddev"]
                tps_incl = pg_output[pgbench_pod][0][run][run_id]["lat_stddev"]
                tps_excl = pg_output[pgbench_pod][0][run][run_id]["tps_excl"]
                g_sheet.insert_row(
                    [
                        f"Pgbench-pod{pg_output[pgbench_pod][1]}-run-{run_id}",
                        int(lat_avg),
                        int(lat_stddev),
                        int(tps_incl),
                        int(tps_excl),
                    ],
                    2,
                )
        g_sheet.insert_row(
            ["", "latency_avg", "lat_stddev", "lat_stddev", "tps_excl"], 2)

        # Capturing versions(OCP, OCS and Ceph) and test run name
        g_sheet.insert_row(
            [
                f"ocp_version:{utils.get_cluster_version()}",
                f"ocs_build_number:{utils.get_ocs_build_number()}",
                f"ceph_version:{utils.get_ceph_version()}",
                f"test_run_name:{utils.get_testrun_name()}",
            ],
            2,
        )
Exemple #5
0
    def export_pfoutput_to_googlesheet(self, sheet_name, sheet_index):
        """
        Collect pillowfight output to google spreadsheet

        Args:
            sheet_name (str): Name of the sheet
            sheet_index (int): Index of sheet

        """
        # Collect data and export to Google doc spreadsheet
        g_sheet = GoogleSpreadSheetAPI(sheet_name=sheet_name,
                                       sheet_index=sheet_index)
        logging.info("Exporting pf data to google spreadsheet")
        for path in listdir(self.logs):
            full_path = join(self.logs, path)
            with open(full_path, "r") as fdesc:
                data_from_log = fdesc.read()
            log_data = self.parse_pillowfight_log(data_from_log)

            g_sheet.insert_row(
                [
                    f"{path}",
                    min(log_data["opspersec"]),
                    max(log_data["resptimes"].keys()) / 1000,
                ],
                2,
            )
        g_sheet.insert_row(["", "opspersec", "resptimes"], 2)

        # Capturing versions(OCP, OCS and Ceph) and test run name
        g_sheet.insert_row(
            [
                f"ocp_version:{utils.get_cluster_version()}",
                f"ocs_build_number:{utils.get_ocs_build_number()}",
                f"ceph_version:{utils.get_ceph_version()}",
                f"test_run_name:{utils.get_testrun_name()}",
            ],
            2,
        )
Exemple #6
0
def run_io_in_background(request):
    """
    Run IO during the test execution
    """
    if config.RUN['cli_params'].get('io_in_bg'):
        log.info(f"Tests will be running while IO is in the background")

        g_sheet = None
        if config.RUN['google_api_secret']:
            g_sheet = GoogleSpreadSheetAPI("IO BG results", 0)
        else:
            log.warning(
                "Google API secret was not found. IO won't be reported to "
                "a Google spreadsheet")
        results = list()
        temp_file = tempfile.NamedTemporaryFile(mode='w+',
                                                prefix='test_status',
                                                delete=False)

        def get_test_status():
            with open(temp_file.name, 'r') as t_file:
                return t_file.readline()

        def set_test_status(status):
            with open(temp_file.name, 'w') as t_file:
                t_file.writelines(status)

        set_test_status('running')

        def finalizer():
            """
            Delete the resources created during setup, used for
            running IO in the test background
            """
            set_test_status('finished')
            try:
                for status in TimeoutSampler(90, 3, get_test_status):
                    if status == 'terminated':
                        break
            except TimeoutExpiredError:
                log.warning("Background IO was still in progress before IO "
                            "thread termination")
            if thread:
                thread.join()

            log.info(f"Background IO has stopped")
            for result in results:
                log.info(f"IOPs after FIO for pod {pod_obj.name}:")
                log.info(f"Read: {result[0]}")
                log.info(f"Write: {result[1]}")

            if pod_obj:
                pod_obj.delete()
                pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
            if pvc_obj:
                pvc_obj.delete()
                pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
            if sc_obj:
                sc_obj.delete()
            if cbp_obj:
                cbp_obj.delete()
            if secret_obj:
                secret_obj.delete()

        request.addfinalizer(finalizer)

        secret_obj = helpers.create_secret(
            interface_type=constants.CEPHBLOCKPOOL)
        cbp_obj = helpers.create_ceph_block_pool()
        sc_obj = helpers.create_storage_class(
            interface_type=constants.CEPHBLOCKPOOL,
            interface_name=cbp_obj.name,
            secret_name=secret_obj.name)
        pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, size='2Gi')
        helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
        pvc_obj.reload()
        pod_obj = helpers.create_pod(interface_type=constants.CEPHBLOCKPOOL,
                                     pvc_name=pvc_obj.name)
        helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
        pod_obj.reload()

        def run_io_in_bg():
            """
            Run IO by executing FIO and deleting the file created for FIO on
            the pod, in a while true loop. Will be running as long as
            the test is running.
            """
            while get_test_status() == 'running':
                pod_obj.run_io('fs', '1G')
                result = pod_obj.get_fio_results()
                reads = result.get('jobs')[0].get('read').get('iops')
                writes = result.get('jobs')[0].get('write').get('iops')
                if g_sheet:
                    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    g_sheet.insert_row([now, reads, writes])

                results.append((reads, writes))

                file_path = os.path.join(
                    pod_obj.get_storage_path(storage_type='fs'),
                    pod_obj.io_params['filename'])
                pod_obj.exec_cmd_on_pod(f'rm -rf {file_path}')
            set_test_status('terminated')

        log.info(f"Start running IO in the test background")

        thread = threading.Thread(target=run_io_in_bg)
        thread.start()
    def test_run_pgsql(self, transactions, pod_name):
        """
        Test pgsql workload
        """
        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_trans = transactions
        timeout = pg_trans * 3
        pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
        pg_data['spec']['workload']['args']['transactions'] = pg_trans
        pg_obj = OCS(**pg_data)
        pg_obj.create()

        # Wait for pgbench pod to be created
        for pgbench_pod in TimeoutSampler(
            pg_trans, 3, get_pod_name_by_pattern,
            'pgbench', 'my-ripsaw'
        ):
            try:
                if pgbench_pod[0] is not None:
                    pgbench_client_pod = pgbench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod not ready yet")

        # Respin Ceph pod
        resource_osd = [f'{pod_name}']
        log.info(f"Respin Ceph pod {pod_name}")
        disruption = disruption_helpers.Disruptions()
        for resource in resource_osd:
            disruption.set_resource(resource=resource)
            disruption.delete_resource()

        # Wait for pg_bench pod to initialized and complete
        log.info("Waiting for pgbench_client to complete")
        pod_obj = OCP(kind='pod')
        pod_obj.wait_for_resource(
            condition='Completed',
            resource_name=pgbench_client_pod,
            timeout=timeout,
            sleep=10,
        )

        # Running pgbench and parsing logs
        output = run_cmd(f'oc logs {pgbench_client_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info(
            "*******PGBench output log*********\n"
            f"{pg_output}"
        )
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour(
                    "PGBench failed to run, no data found on latency_avg"
                )
        log.info("PGBench has completed successfully")

        # Collect data and export to Google doc spreadsheet
        g_sheet = GoogleSpreadSheetAPI(sheet_name="OCS PGSQL", sheet_index=2)
        for lat in pg_output:
            lat_avg = lat['latency_avg']
            lat_stddev = lat['lat_stddev']
            tps_incl = lat['tps_incl']
            tps_excl = lat['tps_excl']
            g_sheet.insert_row(
                [int(lat_avg),
                 int(lat_stddev),
                 int(tps_incl),
                 int(tps_excl)], 2
            )
        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark")
        pg_obj.delete()
    def test_run_pgsql_node_drain(self, transactions=900, node_type='master'):
        """
        Test pgsql workload
        """
        # Create pgbench benchmark
        log.info("Create resource file for pgbench workload")
        pg_trans = transactions
        timeout = pg_trans * 3
        pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
        pg_data['spec']['workload']['args']['transactions'] = pg_trans
        pg_obj = OCS(**pg_data)
        pg_obj.create()

        # Getting pgbench podname
        for pgbench_pod in TimeoutSampler(
            pg_trans, 3, get_pod_name_by_pattern,
            'pgbench', 'my-ripsaw'
        ):
            try:
                if pgbench_pod[0] is not None:
                    pgbench_client_pod = pgbench_pod[0]
                    break
            except IndexError:
                log.info("Bench pod is not found")

        # Wait for pg_bench pod to be in running state
        log.info("Waiting for pgbench_pod to be in running state")
        pod_obj = OCP(kind='pod')
        pod_obj.wait_for_resource(
            condition='Running',
            resource_name=pgbench_client_pod,
            timeout=timeout,
            sleep=5,
        )

        # Node drain with specific node type
        typed_nodes = node.get_typed_nodes(node_type=node_type, num_of_nodes=1)
        typed_node_name = typed_nodes[0].name

        # Node maintenance - to gracefully terminate all pods on the node
        node.drain_nodes([typed_node_name])

        # Make the node schedulable again
        node.schedule_nodes([typed_node_name])

        # Perform cluster and Ceph health checks
        self.sanity_helpers.health_check()

        # Wait for pg_bench pod to complete workload
        log.info("Waiting for pgbench_client to complete")
        pod_obj.wait_for_resource(
            condition='Completed',
            resource_name=pgbench_client_pod,
            timeout=timeout,
            sleep=10,
        )

        # Parsing the results
        output = run_cmd(f'oc logs {pgbench_client_pod}')
        pg_output = utils.parse_pgsql_logs(output)
        log.info(
            "*******PGBench output log*********\n"
            f"{pg_output}"
        )
        for data in pg_output:
            latency_avg = data['latency_avg']
            if not latency_avg:
                raise UnexpectedBehaviour(
                    "PGBench failed to run, no data found on latency_avg"
                )
        log.info("PGBench has completed successfully")

        # Collect data and export to Google doc spreadsheet
        g_sheet = GoogleSpreadSheetAPI(sheet_name="OCS PGSQL", sheet_index=3)
        for lat in pg_output:
            lat_avg = lat['latency_avg']
            lat_stddev = lat['lat_stddev']
            tps_incl = lat['tps_incl']
            tps_excl = lat['tps_excl']
            g_sheet.insert_row(
                [int(lat_avg),
                 int(lat_stddev),
                 int(tps_incl),
                 int(tps_excl)], 2
            )
        # Clean up pgbench benchmark
        log.info("Deleting PG bench benchmark")
        pg_obj.delete()