def Run(benchmark_spec):
    """Runs the pgbench benchark on the client vm, against the db server.

  Args:
    benchmark_spec: benchmark_spec object which contains the database server
      and client_vm

  Returns:
    a list of sample objects
  """
    UpdateBenchmarkSpecWithRunStageFlags(benchmark_spec)

    db = benchmark_spec.relational_db
    connection_string = db.MakePsqlConnectionString(TEST_DB_NAME)

    common_metadata = {
        'scale_factor': benchmark_spec.scale_factor,
        'postgres_db_size_MB': db.postgres_db_size_MB,
        'seconds_per_test': benchmark_spec.seconds_per_test,
        'seconds_to_pause_before_steps': benchmark_spec.seconds_to_pause,
    }
    for client in benchmark_spec.client_counts:
        time.sleep(benchmark_spec.seconds_to_pause)
        jobs = min(client, 16)
        command = (
            'pgbench {0} --client={1} --jobs={2} --time={3} --progress=1 '
            '--report-latencies'.format(connection_string, client, jobs,
                                        benchmark_spec.seconds_per_test))
        _, stderr = benchmark_spec.vms[0].RobustRemoteCommand(command,
                                                              should_log=True)
        samples = MakeSamplesFromOutput(stderr, client, jobs, common_metadata)
        publisher.PublishRunStageSamples(benchmark_spec, samples)
    return []
Ejemplo n.º 2
0
def Run(benchmark_spec):
    """Run the MySQL Service benchmark and publish results.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    Results.
  """
    logging.info('Start benchmarking MySQL Service, '
                 'Cloud Provider is %s.', FLAGS.cloud)
    client_vm = benchmark_spec.vms[0]
    db = benchmark_spec.relational_db

    for thread_count in FLAGS.sysbench_thread_counts:
        metadata = CreateMetadataFromFlags(db)
        metadata['sysbench_thread_count'] = thread_count
        # The run phase is common across providers. The VMs[0] object contains all
        # information and states necessary to carry out the run.
        run_results = _RunSysbench(client_vm, metadata, benchmark_spec,
                                   thread_count)
        print(run_results)
        publisher.PublishRunStageSamples(benchmark_spec, run_results)

    if (FLAGS.use_managed_db
            and benchmark_spec.relational_db.spec.high_availability
            and FLAGS.sysbench_pre_failover_seconds):
        last_client_count = FLAGS.sysbench_thread_counts[
            len(FLAGS.sysbench_thread_counts) - 1]
        failover_results = _PerformFailoverTest(client_vm, metadata,
                                                benchmark_spec,
                                                last_client_count)
        print(failover_results)
        publisher.PublishRunStageSamples(benchmark_spec, failover_results)

    # all results have already been published
    # database results take a long time to gather.  If later client counts
    # or failover tests fail, still want the data from the earlier tests.
    # so, results are published as they are found.
    return []
Ejemplo n.º 3
0
def _RunTest(benchmark_spec, sender, receiver, dest_ip, ip_type, conf,
             cooldown_s):
    """Run a single NTTTCP test, and publish the results."""
    try:
        results = ntttcp.RunNtttcp(sender, receiver, dest_ip, ip_type,
                                   conf.udp, conf.threads, conf.time_s,
                                   conf.packet_size, cooldown_s)
        publisher.PublishRunStageSamples(benchmark_spec, results)
        return True
    except IOError:
        logging.info('Failed to publish %s IP results for config %s', ip_type,
                     str(conf))
        return False
Ejemplo n.º 4
0
def RunPgBench(benchmark_spec,
               relational_db,
               vm,
               test_db_name,
               client_counts,
               job_counts,
               seconds_to_pause,
               seconds_per_test,
               metadata,
               file=None,
               path=None):
    """Run Pgbench on the client VM.

  Args:
    benchmark_spec: Benchmark spec of the run
    relational_db: Relational database object
    vm: Client VM
    test_db_name: The name of the database
    client_counts: Number of client
    job_counts: Number of job
    seconds_to_pause: Seconds to pause between test
    seconds_per_test: Seconds per test
    metadata: Metadata of the benchmark
    file: Filename of the benchmark
    path: File path of the benchmar.
  """
    connection_string = relational_db.client_vm_query_tools.GetConnectionString(
        database_name=test_db_name)

    if file and path:
        metadata['pgbench_file'] = file

    samples = []
    if job_counts and len(client_counts) != len(job_counts):
        raise ValueError('Length of clients and jobs must be the same.')
    for i in range(len(client_counts)):
        time.sleep(seconds_to_pause)
        client = client_counts[i]
        if job_counts:
            jobs = job_counts[i]
        else:
            jobs = min(client, 16)
        command = (f'pgbench {connection_string} --client={client} '
                   f'--jobs={jobs} --time={seconds_per_test} --progress=1 '
                   '--report-latencies')
        if file and path:
            command = f'cd {path} && {command} --file={file}'
        _, stderr = vm.RobustRemoteCommand(command)
        samples = MakeSamplesFromOutput(stderr, client, jobs, metadata)
        publisher.PublishRunStageSamples(benchmark_spec, samples)