def get_pgbech_pod_status_table(self, pgbench_pods): """ Get pgbench pod data and print results on a table Args: pgbench pods (list): List of pgbench pods """ pgbench_pod_table = PrettyTable() pgbench_pod_table.field_names = [ 'pod_name', 'scaling_factor', 'num_clients', 'num_threads', 'trans_client', 'actually_trans', 'latency_avg', 'lat_stddev', 'tps_incl', 'tps_excl' ] for pgbench_pod in pgbench_pods: output = run_cmd(f'oc logs {pgbench_pod.name}') pg_output = utils.parse_pgsql_logs(output) for pod_output in pg_output: for pod in pod_output.values(): pgbench_pod_table.add_row([ pgbench_pod.name, pod['scaling_factor'], pod['num_clients'], pod['num_threads'], pod['number_of_transactions_per_client'], pod['number_of_transactions_actually_processed'], pod['latency_avg'], pod['lat_stddev'], pod['tps_incl'], pod['tps_excl'] ]) log.info(f'\n{pgbench_pod_table}\n')
def validate_pgbench_run(self, pgbench_pods, print_table=True): """ Validate pgbench run Args: pgbench pods (list): List of pgbench pods Returns: pg_output (list): pgbench outputs in list """ all_pgbench_pods_output = [] for pgbench_pod in pgbench_pods: log.info(f"pgbench_client_pod===={pgbench_pod.name}====") output = run_cmd( f"oc logs {pgbench_pod.name} -n {RIPSAW_NAMESPACE}") pg_output = utils.parse_pgsql_logs(output) log.info("*******PGBench output log*********\n" f"{pg_output}") # for data in all_pgbench_pods_output: for data in pg_output: run_id = list(data.keys()) latency_avg = data[run_id[0]]["latency_avg"] if not latency_avg: raise UnexpectedBehaviour("PGBench failed to run, " "no data found on latency_avg") log.info(f"PGBench on {pgbench_pod.name} completed successfully") all_pgbench_pods_output.append((pg_output, pgbench_pod.name)) if print_table: pgbench_pod_table = PrettyTable() pgbench_pod_table.field_names = [ "pod_name", "scaling_factor", "num_clients", "num_threads", "trans_client", "actually_trans", "latency_avg", "lat_stddev", "tps_incl", "tps_excl", ] for pgbench_pod_out in all_pgbench_pods_output: for pod_output in pgbench_pod_out[0]: for pod in pod_output.values(): pgbench_pod_table.add_row([ pgbench_pod_out[1], pod["scaling_factor"], pod["num_clients"], pod["num_threads"], pod["number_of_transactions_per_client"], pod["number_of_transactions_actually_processed"], pod["latency_avg"], pod["lat_stddev"], pod["tps_incl"], pod["tps_excl"], ]) log.info(f"\n{pgbench_pod_table}\n") return all_pgbench_pods_output
def validate_pgbench_run(self, pgbench_pods): """ Validate pgbench run Args: pgbench pods (list): List of pgbench pods Returns: pg_output (list): pgbench outputs in list """ all_pgbench_pods_output = [] for pgbench_pod in pgbench_pods: log.info(f"pgbench_client_pod===={pgbench_pod.name}====") output = run_cmd(f'oc logs {pgbench_pod.name}') pg_output = utils.parse_pgsql_logs(output) log.info("*******PGBench output log*********\n" f"{pg_output}") # for data in all_pgbench_pods_output: for data in pg_output: run_id = list(data.keys()) latency_avg = data[run_id[0]]['latency_avg'] if not latency_avg: raise UnexpectedBehaviour("PGBench failed to run, " "no data found on latency_avg") log.info(f"PGBench on {pgbench_pod.name} completed successfully") all_pgbench_pods_output.append(pg_output) return all_pgbench_pods_output
def test_sql_workload_simple(self, ripsaw): """ This is a basic pgsql workload """ # Deployment postgres log.info("Deploying postgres database") ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml') ripsaw.setup_postgresql() # Create pgbench benchmark log.info("Create resource file for pgbench workload") pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML) pg_obj = OCS(**pg_data) pg_obj.create() # Wait for pgbench pod to be created for pgbench_pod in TimeoutSampler(300, 3, get_pod_name_by_pattern, 'pgbench-1-dbs-client', 'my-ripsaw'): try: if pgbench_pod[0] is not None: pgbench_client_pod = pgbench_pod[0] break except IndexError: log.info("Bench pod not ready yet") # Wait for pg_bench pod to initialized and complete log.info("Waiting for pgbench_client to complete") pod_obj = OCP(kind='pod') pod_obj.wait_for_resource( condition='Completed', resource_name=pgbench_client_pod, timeout=800, sleep=10, ) # Running pgbench and parsing logs output = run_cmd(f'oc logs {pgbench_client_pod}') pg_output = utils.parse_pgsql_logs(output) log.info("*******PGBench output log*********\n" f"{pg_output}") for data in pg_output: latency_avg = data['latency_avg'] if not latency_avg: raise UnexpectedBehaviour("PGBench failed to run, " "no data found on latency_avg") log.info("PGBench has completed successfully") # Clean up pgbench benchmark log.info("Deleting PG bench benchmark") pg_obj.delete()
def test_sql_workload_simple(self, ripsaw): """ This is a basic pgsql workload """ # Deployment postgres log.info("Deploying postgres database") ripsaw.apply_crd('resources/crds/' 'ripsaw_v1alpha1_ripsaw_crd.yaml') ripsaw.setup_postgresql() run_cmd('bin/oc wait --for condition=ready pod ' '-l app=postgres ' '--timeout=120s') # Create pgbench benchmark log.info("Create resource file for pgbench workload") pg_data = templating.load_yaml_to_dict(constants.PGSQL_BENCHMARK_YAML) pg_obj = OCS(**pg_data) pg_obj.create() # Wait for pgbench pod to be created log.info("waiting for pgbench benchmark to create, " f"PGbench pod name: {pg_obj.name} ") wait_time = 30 log.info(f"Waiting {wait_time} seconds...") time.sleep(wait_time) pgbench_pod = run_cmd('bin/oc get pods -l ' 'app=pgbench-client -o name') pgbench_pod = pgbench_pod.split('/')[1] run_cmd('bin/oc wait --for condition=Initialized ' f'pods/{pgbench_pod} ' '--timeout=60s') run_cmd('bin/oc wait --for condition=Complete jobs ' '-l app=pgbench-client ' '--timeout=300s') # Running pgbench and parsing logs output = run_cmd(f'bin/oc logs {pgbench_pod}') pg_output = utils.parse_pgsql_logs(output) log.info("*******PGBench output log*********\n" f"{pg_output}") for data in pg_output: latency_avg = data['latency_avg'] if not latency_avg: raise UnexpectedBehaviour("PGBench failed to run, " "no data found on latency_avg") log.info("PGBench has completed successfully") # Clean up pgbench benchmark log.info("Deleting PG bench benchmark:") pg_obj.delete()
def validate_pgbench_run(self, pgbench_pods, print_table=True): """ Validate pgbench run Args: pgbench pods (list): List of pgbench pods Returns: pg_output (list): pgbench outputs in list """ all_pgbench_pods_output = [] for pgbench_pod in pgbench_pods: log.info(f"pgbench_client_pod===={pgbench_pod.name}====") output = run_cmd(f'oc logs {pgbench_pod.name}') pg_output = utils.parse_pgsql_logs(output) log.info("*******PGBench output log*********\n" f"{pg_output}") # for data in all_pgbench_pods_output: for data in pg_output: run_id = list(data.keys()) latency_avg = data[run_id[0]]['latency_avg'] if not latency_avg: raise UnexpectedBehaviour("PGBench failed to run, " "no data found on latency_avg") log.info(f"PGBench on {pgbench_pod.name} completed successfully") all_pgbench_pods_output.append((pg_output, pgbench_pod.name)) if print_table: pgbench_pod_table = PrettyTable() pgbench_pod_table.field_names = [ 'pod_name', 'scaling_factor', 'num_clients', 'num_threads', 'trans_client', 'actually_trans', 'latency_avg', 'lat_stddev', 'tps_incl', 'tps_excl' ] for pgbench_pod_out in all_pgbench_pods_output: for pod_output in pgbench_pod_out[0]: for pod in pod_output.values(): pgbench_pod_table.add_row([ pgbench_pod_out[1], pod['scaling_factor'], pod['num_clients'], pod['num_threads'], pod['number_of_transactions_per_client'], pod['number_of_transactions_actually_processed'], pod['latency_avg'], pod['lat_stddev'], pod['tps_incl'], pod['tps_excl'] ]) log.info(f'\n{pgbench_pod_table}\n') return all_pgbench_pods_output
def get_pgbech_pod_status_table(self, pgbench_pods): """ Get pgbench pod data and print results on a table Args: pgbench pods (list): List of pgbench pods """ pgbench_pod_table = PrettyTable() pgbench_pod_table.field_names = [ "pod_name", "scaling_factor", "num_clients", "num_threads", "trans_client", "actually_trans", "latency_avg", "lat_stddev", "tps_incl", "tps_excl", ] for pgbench_pod in pgbench_pods: output = run_cmd(f"oc logs {pgbench_pod.name} -n {BMO_NAME}") pg_output = utils.parse_pgsql_logs(output) for pod_output in pg_output: for pod in pod_output.values(): pgbench_pod_table.add_row( [ pgbench_pod.name, pod["scaling_factor"], pod["num_clients"], pod["num_threads"], pod["number_of_transactions_per_client"], pod["number_of_transactions_actually_processed"], pod["latency_avg"], pod["lat_stddev"], pod["tps_incl"], pod["tps_excl"], ] ) log.info(f"\n{pgbench_pod_table}\n")
def test_run_pgsql(self, transactions, pod_name): """ Test pgsql workload """ # Create pgbench benchmark log.info("Create resource file for pgbench workload") pg_trans = transactions timeout = pg_trans * 3 pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML) pg_data['spec']['workload']['args']['transactions'] = pg_trans pg_obj = OCS(**pg_data) pg_obj.create() # Wait for pgbench pod to be created for pgbench_pod in TimeoutSampler( pg_trans, 3, get_pod_name_by_pattern, 'pgbench', 'my-ripsaw' ): try: if pgbench_pod[0] is not None: pgbench_client_pod = pgbench_pod[0] break except IndexError: log.info("Bench pod not ready yet") # Respin Ceph pod resource_osd = [f'{pod_name}'] log.info(f"Respin Ceph pod {pod_name}") disruption = disruption_helpers.Disruptions() for resource in resource_osd: disruption.set_resource(resource=resource) disruption.delete_resource() # Wait for pg_bench pod to initialized and complete log.info("Waiting for pgbench_client to complete") pod_obj = OCP(kind='pod') pod_obj.wait_for_resource( condition='Completed', resource_name=pgbench_client_pod, timeout=timeout, sleep=10, ) # Running pgbench and parsing logs output = run_cmd(f'oc logs {pgbench_client_pod}') pg_output = utils.parse_pgsql_logs(output) log.info( "*******PGBench output log*********\n" f"{pg_output}" ) for data in pg_output: latency_avg = data['latency_avg'] if not latency_avg: raise UnexpectedBehaviour( "PGBench failed to run, no data found on latency_avg" ) log.info("PGBench has completed successfully") # Collect data and export to Google doc spreadsheet g_sheet = GoogleSpreadSheetAPI(sheet_name="OCS PGSQL", sheet_index=2) for lat in pg_output: lat_avg = lat['latency_avg'] lat_stddev = lat['lat_stddev'] tps_incl = lat['tps_incl'] tps_excl = lat['tps_excl'] g_sheet.insert_row( [int(lat_avg), int(lat_stddev), int(tps_incl), int(tps_excl)], 2 ) # Clean up pgbench benchmark log.info("Deleting PG bench benchmark") pg_obj.delete()
def test_run_pgsql_node_drain(self, transactions=900, node_type='master'): """ Test pgsql workload """ # Create pgbench benchmark log.info("Create resource file for pgbench workload") pg_trans = transactions timeout = pg_trans * 3 pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML) pg_data['spec']['workload']['args']['transactions'] = pg_trans pg_obj = OCS(**pg_data) pg_obj.create() # Getting pgbench podname for pgbench_pod in TimeoutSampler( pg_trans, 3, get_pod_name_by_pattern, 'pgbench', 'my-ripsaw' ): try: if pgbench_pod[0] is not None: pgbench_client_pod = pgbench_pod[0] break except IndexError: log.info("Bench pod is not found") # Wait for pg_bench pod to be in running state log.info("Waiting for pgbench_pod to be in running state") pod_obj = OCP(kind='pod') pod_obj.wait_for_resource( condition='Running', resource_name=pgbench_client_pod, timeout=timeout, sleep=5, ) # Node drain with specific node type typed_nodes = node.get_typed_nodes(node_type=node_type, num_of_nodes=1) typed_node_name = typed_nodes[0].name # Node maintenance - to gracefully terminate all pods on the node node.drain_nodes([typed_node_name]) # Make the node schedulable again node.schedule_nodes([typed_node_name]) # Perform cluster and Ceph health checks self.sanity_helpers.health_check() # Wait for pg_bench pod to complete workload log.info("Waiting for pgbench_client to complete") pod_obj.wait_for_resource( condition='Completed', resource_name=pgbench_client_pod, timeout=timeout, sleep=10, ) # Parsing the results output = run_cmd(f'oc logs {pgbench_client_pod}') pg_output = utils.parse_pgsql_logs(output) log.info( "*******PGBench output log*********\n" f"{pg_output}" ) for data in pg_output: latency_avg = data['latency_avg'] if not latency_avg: raise UnexpectedBehaviour( "PGBench failed to run, no data found on latency_avg" ) log.info("PGBench has completed successfully") # Collect data and export to Google doc spreadsheet g_sheet = GoogleSpreadSheetAPI(sheet_name="OCS PGSQL", sheet_index=3) for lat in pg_output: lat_avg = lat['latency_avg'] lat_stddev = lat['lat_stddev'] tps_incl = lat['tps_incl'] tps_excl = lat['tps_excl'] g_sheet.insert_row( [int(lat_avg), int(lat_stddev), int(tps_incl), int(tps_excl)], 2 ) # Clean up pgbench benchmark log.info("Deleting PG bench benchmark") pg_obj.delete()