예제 #1
0
def _RunDistributedTf(benchmark_spec):
    """Run distributed TensorFlow for each model specified.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
      required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """

    ps_hosts = benchmark_spec.vm_groups['parameter_server_hosts']
    worker_hosts = benchmark_spec.vm_groups['worker_hosts']
    dist_args = '--ps_hosts={ps_args} --worker_hosts={worker_args}'.format(
        ps_args=_GetHostsArgs(ps_hosts),
        worker_args=_GetHostsArgs(worker_hosts))
    flattened_results = []
    vm_pid = collections.namedtuple('vm_pid', 'vm pid')
    gpu_type = getattr(benchmark_spec, 'gpu_type', None)
    for model in FLAGS.tf_models:
        for batch_size in _GetBatchSizes(model, gpu_type):
            ps_pids = []
            for task_index, vm in enumerate(ps_hosts):
                dist_ps_args = ('{args} --task_index={index} &\n'
                                'echo {pid} $!').format(args=dist_args,
                                                        index=task_index,
                                                        pid=PID_PREFIX)
                pid = _RunModelOnVm(vm, model, batch_size, benchmark_spec,
                                    dist_ps_args, 'ps')
                ps_pids.append(vm_pid(vm=vm, pid=pid))
            args = []
            for task_index, vm in enumerate(worker_hosts):
                dist_worker_args = ('{args} --job_name=worker '
                                    '--task_index={index}').format(
                                        args=dist_args, index=task_index)
                args.append(((vm, model, batch_size, benchmark_spec,
                              dist_worker_args, 'worker'), {}))
            result = vm_util.RunThreaded(_RunModelOnVm, args)
            for ps_pid in ps_pids:
                ps_pid.vm.RemoteCommand('kill -9 %s' % ps_pid.pid)
            flattened_results.extend(vm_result for vm_result in result)
    return flattened_results
예제 #2
0
    def _RunThreaded(self, vms, **kwargs):
        """Run a single workload using `vms`."""
        target = kwargs.pop('target', None)
        if target is not None:
            target_per_client = target // len(vms)
            targets = [
                target_per_client + (1 if i < (target % len(vms)) else 0)
                for i in xrange(len(vms))
            ]
        else:
            targets = [target for _ in vms]

        results = []

        if self.shardkeyspace:
            record_count = int(self.workload_meta.get('recordcount', '1000'))
            n_per_client = long(record_count) // len(vms)
            loader_counts = [
                n_per_client + (1 if i < (record_count % len(vms)) else 0)
                for i in xrange(len(vms))
            ]

        def _Run(loader_index):
            vm = vms[loader_index]
            params = copy.deepcopy(kwargs)
            params['target'] = targets[loader_index]
            if self.perclientparam is not None:
                params.update(self.perclientparam[loader_index])
            if self.shardkeyspace:
                start = sum(loader_counts[:loader_index])
                end = start + loader_counts[loader_index]
                params.update(insertstart=start, recordcount=end)
            results.append(self._Run(vm, **params))
            logging.info('VM %d (%s) finished', loader_index, vm)

        vm_util.RunThreaded(_Run, range(len(vms)))

        if len(results) != len(vms):
            raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
                len(results), len(vms), results))

        return results
예제 #3
0
def RunNtttcp(sending_vm, receiving_vm, receiving_ip_address, ip_type):
    """Run NTttcp and return the samples collected from the run."""

    shared_options = '-xml -t {time} -p {port} '.format(time=FLAGS.ntttcp_time,
                                                        port=BASE_DATA_PORT)

    client_options = '-s -m \'{threads},*,{ip}\''.format(
        threads=FLAGS.ntttcp_threads, ip=receiving_ip_address)
    server_options = '-r -m \'{threads},*,0.0.0.0\''.format(
        threads=FLAGS.ntttcp_threads)

    ntttcp_exe_dir = ntpath.join(sending_vm.temp_dir, 'x86fre')

    # NTttcp will append to the xml file when it runs, which causes parsing
    # to fail if there was a preexisting xml file. To be safe, try deleting
    # the xml file.
    rm_command = 'cd {ntttcp_exe_dir}; rm xml.txt'.format(
        ntttcp_exe_dir=ntttcp_exe_dir)
    sending_vm.RemoteCommand(rm_command,
                             ignore_failure=True,
                             suppress_warning=True)

    def _RunNtttcp(vm, options):
        command = 'cd {ntttcp_exe_dir}; .\\NTttcp.exe {ntttcp_options}'.format(
            ntttcp_exe_dir=ntttcp_exe_dir, ntttcp_options=options)
        vm.RemoteCommand(command)

    args = [((vm, shared_options + options), {}) for vm, options in zip(
        [sending_vm, receiving_vm], [client_options, server_options])]
    vm_util.RunThreaded(_RunNtttcp, args)

    cat_command = 'cd {ntttcp_exe_dir}; cat xml.txt'.format(
        ntttcp_exe_dir=ntttcp_exe_dir)
    stdout, _ = sending_vm.RemoteCommand(cat_command)

    metadata = {'ip_type': ip_type}
    for vm_specifier, vm in ('receiving', receiving_vm), ('sending',
                                                          sending_vm):
        for k, v in vm.GetResourceMetadata().iteritems():
            metadata['{0}_{1}'.format(vm_specifier, k)] = v

    return ParseNtttcpResults(stdout, metadata)
def Prepare(benchmark_spec):
    """Install MongoDB on one VM and YCSB on another.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    server_partials = [
        functools.partial(_PrepareServer, mongo_vm)
        for mongo_vm in benchmark_spec.vm_groups['workers']
    ]
    client_partials = [
        functools.partial(_PrepareClient, client)
        for client in benchmark_spec.vm_groups['clients']
    ]

    vm_util.RunThreaded((lambda f: f()), server_partials + client_partials)
    benchmark_spec.executor = ycsb.YCSBExecutor('mongodb', cp=ycsb.YCSB_DIR)
    server = benchmark_spec.vm_groups['workers'][0]
    benchmark_spec.mongodb_url = 'mongodb://%s:27017/' % server.internal_ip,
예제 #5
0
def Prepare(benchmark_spec):
  """Install and build oldisim on the target vm.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
  vms = benchmark_spec.vms

  leaf_vms = [vm for vm_idx, vm in enumerate(vms)
              if vm_idx >= (NUM_DRIVERS + NUM_ROOTS)]

  if vms:
    vm_util.RunThreaded(InstallAndBuild, vms)

  # Launch job on the leaf nodes.
  leaf_server_bin = oldisim_dependencies.BinaryPath('LeafNode')
  for vm in leaf_vms:
    leaf_cmd = '%s --threads=%s' % (leaf_server_bin, vm.num_cpus)
    vm.RemoteCommand('%s &> /dev/null &' % leaf_cmd)
예제 #6
0
  def _Create(self):
    """Creates VPN objects for VpnGateway pairs."""

    benchmark_spec = context.GetThreadBenchmarkSpec()
    if benchmark_spec is None:
      raise errors.Error('CreateVPN Service. called in a thread without a '
                         'BenchmarkSpec.')

    self.vpn_gateway_pairs = self.GetVpnGatewayPairs(
        benchmark_spec.vpn_gateways)

    for gateway_pair in self.vpn_gateway_pairs:
      # creates the vpn if it doesn't exist and registers in bm_spec.vpns
      suffix = self.GetNewSuffix()
      vpn_id = VPN().getKeyFromGatewayPair(gateway_pair, suffix)
      self.vpns[vpn_id] = VPN().GetVPN(gateway_pair, suffix)
      self.vpns[vpn_id].tunnel_config.setConfig(**self.vpn_properties)

    vm_util.RunThreaded(lambda vpn: self.vpns[vpn].ConfigureTunnel(),
                        list(self.vpns.keys()))
def Prepare(benchmark_spec):
    """Prepare the cloud redis instance to YCSB tasks.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    benchmark_spec.always_call_cleanup = True

    ycsb_vms = benchmark_spec.vm_groups['clients']
    vm_util.RunThreaded(_Install, ycsb_vms)
    instance_details = benchmark_spec.cloud_redis.GetInstanceDetails()
    redis_args = {
        'shardkeyspace': True,
        'redis.host': instance_details['host'],
        'redis.port': instance_details['port']
    }
    if 'password' in instance_details:
        redis_args['redis.password'] = instance_details['password']
    benchmark_spec.executor = ycsb.YCSBExecutor('redis', **redis_args)
예제 #8
0
    def _Create(self):
        """Create an un-managed yarn cluster."""
        logging.info('Should have created vms by now.')
        logging.info(str(self.vms))

        def InstallSpark(vm):
            vm.Install('spark')
            if self.cloud == 'GCP':
                hadoop.InstallGcsConnector(vm)

        if 'worker_group' not in self.vms:
            raise errors.Resource.CreationError(
                'UnmanagedDpbSparkCluster requires VMs in a worker_group.')

        vm_util.RunThreaded(
            InstallSpark, self.vms['worker_group'] + self.vms['master_group'])
        self.leader = self.vms['master_group'][0]
        spark.ConfigureAndStart(self.leader,
                                self.vms['worker_group'],
                                configure_s3=self.cloud == 'AWS')
def Prepare(benchmark_spec):
    """Install cloudsuite web search and start the server on all machines.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    vms = benchmark_spec.vms
    servers = benchmark_spec.vm_groups['workers']
    clients = benchmark_spec.vm_groups['clients']

    def PrepareVM(vm):
        vm.Install('wget')
        vm.RemoteCommand('mkdir -p {0}'.format(CLOUDSUITE_WEB_SEARCH_DIR))

    vm_util.RunThreaded(PrepareVM, vms)

    _PrepareSolr(servers)
    _PrepareClient(clients)
    _BuildIndex(servers)
def Prepare(bm_spec: _BenchmarkSpec) -> None:
    """Install Redis on one VM and memtier_benchmark on another."""
    server_count = len(bm_spec.vm_groups['servers'])
    if server_count != 1:
        raise errors.Benchmarks.PrepareException(
            f'Expected servers vm count to be 1, got {server_count}')
    client_vms = bm_spec.vm_groups['clients']
    server_vm = bm_spec.vm_groups['servers'][0]

    # Install memtier
    vm_util.RunThreaded(lambda client: client.Install('memtier'),
                        client_vms + [server_vm])

    # Install redis on the 1st machine.
    server_vm.Install('redis_server')
    redis_server.Start(server_vm)
    memtier.Load(server_vm, 'localhost', str(redis_server.DEFAULT_PORT))

    bm_spec.redis_endpoint_ip = bm_spec.vm_groups['servers'][0].internal_ip
    vm_util.SetupSimulatedMaintenance(server_vm)
예제 #11
0
def Run(benchmark_spec):
  """Kick off gartner boot script on launcher server vms.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
      required to run the benchmark.

  Returns:
    A list of benchmark samples.
  """
  launcher_vms = benchmark_spec.vm_groups['servers']
  vm_util.RunThreaded(
      lambda vm: vm.RemoteCommand('bash {} 2>&1 | tee log'.format(_BOOT_PATH)),
      launcher_vms)
  try:
    _WaitForResponses(launcher_vms)
  except InsufficientBootsError:
    # On really large-scale boots, some failures are expected.
    logging.info('Some VMs failed to boot.')
  return _ParseResult(launcher_vms)
예제 #12
0
    def Delete(self):
        if self.deleted:
            return

        if self.container_registry:
            self.container_registry.Delete()
        if self.spark_service:
            self.spark_service.Delete()
        if self.dpb_service:
            self.dpb_service.Delete()
        if self.managed_relational_db:
            self.managed_relational_db.Delete()
        if self.cloud_tpu:
            self.cloud_tpu.Delete()
        if self.edw_service:
            self.edw_service.Delete()

        if self.vms:
            try:
                vm_util.RunThreaded(self.DeleteVm, self.vms)
            except Exception:
                logging.exception('Got an exception deleting VMs. '
                                  'Attempting to continue tearing down.')

        for firewall in self.firewalls.itervalues():
            try:
                firewall.DisallowAllPorts()
            except Exception:
                logging.exception('Got an exception disabling firewalls. '
                                  'Attempting to continue tearing down.')

        for net in self.networks.itervalues():
            try:
                net.Delete()
            except Exception:
                logging.exception('Got an exception deleting networks. '
                                  'Attempting to continue tearing down.')
        if self.container_cluster:
            self.container_cluster.Delete()

        self.deleted = True
def Run(benchmark_spec):
    """Run netperf on target vms.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    Total throughput, average latency in the form of tuple. The tuple contains
        the sample metric (string), value (float), unit (string).
  """
    vms = benchmark_spec.vms
    num_vms = len(vms)
    results = []
    for netperf_benchmark in NETPERF_BENCHMARKSS:
        args = []
        metadata = {
            'number_machines': num_vms,
            'number_connections': FLAGS.num_connections
        }

        if netperf_benchmark == 'TCP_STREAM':
            metric = 'TCP_STREAM_Total_Throughput'
            unit = 'Mbits/sec'
            value = 0.0
        else:
            metric = 'TCP_RR_Average_Latency'
            unit = 'ms'
            value = 0.0
        result = [metric, value, unit, metadata]
        args = [((source, netperf_benchmark, vms, result), {})
                for source in vms]
        vm_util.RunThreaded(RunNetperf, args, num_vms)
        result = sample.Sample(*result)
        if netperf_benchmark == 'TCP_RR':
            denom = ((num_vms - 1) * num_vms * FLAGS.num_connections)
            result = result._replace(value=result.value / denom)

        results.append(result)
    logging.info(results)
    return results
def RunHpccSource(
        vms: List[linux_vm.BaseLinuxVirtualMachine]) -> List[sample.Sample]:
    """Returns the parsed output from running the compiled from source HPCC."""
    headnode_vm = vms[0]
    # backup existing HPCC output, if any
    headnode_vm.RemoteCommand(('if [ -f hpccoutf.txt ]; then '
                               'mv hpccoutf.txt hpccoutf-$(date +%s).txt; '
                               'fi'))
    num_processes = len(vms) * headnode_vm.NumCpusForBenchmark()
    run_as_root = '--allow-run-as-root' if FLAGS.mpirun_allow_run_as_root else ''
    mpi_flags = (
        f'-machinefile {MACHINEFILE} --mca orte_rsh_agent '
        f'"ssh -o StrictHostKeyChecking=no" {run_as_root} {_MpiEnv()}')
    mpi_cmd = 'mpirun '
    hpcc_exec = './hpcc'
    if FLAGS.hpcc_math_library == hpcc.HPCC_MATH_LIBRARY_MKL:
        # Must exec HPCC wrapper script to pickup location of libiomp5.so
        vm_util.RunThreaded(_CreateHpccWrapper, vms)
        hpcc_exec = f'./{HPCC_WRAPPER}'

    if FLAGS.hpcc_numa_binding:
        numa_map = numactl.GetNuma(headnode_vm)
        numa_hpcc_cmd = []
        for node, num_cpus in numa_map.items():
            numa_hpcc_cmd.append(f'-np {num_cpus} {mpi_flags} '
                                 f'numactl --cpunodebind {node} '
                                 f'--membind {node} {hpcc_exec}')
        mpi_cmd += ' : '.join(numa_hpcc_cmd)
    else:
        mpi_cmd += f'-np {num_processes} {mpi_flags} {hpcc_exec}'

    headnode_vm.RobustRemoteCommand(mpi_cmd,
                                    timeout=int(FLAGS.hpcc_timeout_hours *
                                                SECONDS_PER_HOUR))
    logging.info('HPCC Results:')
    stdout, _ = headnode_vm.RemoteCommand('cat hpccoutf.txt', should_log=True)
    if stdout.startswith('HPL ERROR'):
        # Annoyingly the mpi_cmd will succeed when there is an HPL error
        raise errors.Benchmarks.RunError(f'Error running HPL: {stdout}')

    return ParseOutput(stdout)
def Prepare(benchmark_spec):
  """Install Redis on one VM and memtier_benchmark on another.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
  vms = benchmark_spec.vms
  redis_vm = vms[0]
  # Install latest redis on the 1st machine.
  redis_vm.Install('redis_server')
  redis_server.Configure(redis_vm)
  redis_server.Start(redis_vm)

  # Remove snapshotting
  sed_cmd = (r"sed -i -e '/save 900/d' -e '/save 300/d' -e '/save 60/d' -e 's/#"
             "   save \"\"/save \"\"/g' %s/redis.conf")
  redis_vm.RemoteCommand(sed_cmd % redis_server.GetRedisDir())

  args = [((vm,), {}) for vm in vms]
  vm_util.RunThreaded(PrepareLoadgen, args)

  for i in range(GetNumRedisServers(redis_vm)):
    port = FIRST_PORT + i
    redis_vm.RemoteCommand(
        'cp %s/redis.conf %s/redis-%d.conf' %
        (redis_server.GetRedisDir(), redis_server.GetRedisDir(), port))
    redis_vm.RemoteCommand(
        r'sed -i -e "s/port 6379/port %d/g" %s/redis-%d.conf' %
        (port, redis_server.GetRedisDir(), port))
    redis_vm.RemoteCommand(
        'nohup sudo %s/src/redis-server %s/redis-%d.conf &> /dev/null &' %
        (redis_server.GetRedisDir(), redis_server.GetRedisDir(), port))
    # Pre-populate the redis server(s) with data
    redis_vm.RemoteCommand(
        'memtier_benchmark -s localhost -p %d -d %s -t %d -c %d '
        '--ratio 1:0 --key-pattern %s --pipeline %d '
        '--key-minimum %d --key-maximum %d -n allkeys ' %
        (port, FLAGS.memtier_data_size, LOAD_THREAD, LOAD_CLIENT,
         FLAGS.memtier_key_pattern, LOAD_PIPELINE, START_KEY,
         FLAGS.memtier_requests))
예제 #16
0
def Prepare(benchmark_spec):
    """Prepares the cloudharmony network benchmark."""
    # Force cleanup because no standalone VMs are created to trigger normally.
    benchmark_spec.always_call_cleanup = True

    vm_groups = benchmark_spec.vm_groups
    client = vm_groups['client'][0]
    client.Install('cloud_harmony_network')

    # Ignore complaints from using self-signed certificate
    if FLAGS.ch_network_test == 'ssl':
        client.RemoteCommand('echo insecure >> $HOME/.curlrc')

    if FLAGS.ch_network_test_service_type == COMPUTE:
        vm_util.RunThreaded(_PrepareServer, vm_groups['server'])
    elif FLAGS.ch_network_test_service_type == STORAGE:
        _PrepareBucket(benchmark_spec)
    elif FLAGS.ch_network_test_service_type == DNS:
        pass
    else:
        raise NotImplementedError()
def Prepare(benchmark_spec):
    """Prepare the virtual machines to run mutilate against memcached.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    clients = benchmark_spec.vm_groups['client']
    master = clients[0]
    server = benchmark_spec.vm_groups['server'][0]
    client_install_fns = [
        functools.partial(vm.Install, 'mutilate') for vm in clients
    ]
    server_install_fns = [
        functools.partial(server.Install, 'memcached_server')
    ]
    vm_util.RunThreaded(lambda f: f(), client_install_fns + server_install_fns)

    memcached_server.ConfigureAndStart(server,
                                       smp_affinity=FLAGS.set_smp_affinity)
    mutilate.Load(master, server.internal_ip, memcached_server.MEMCACHED_PORT)
def _RunTf(benchmark_spec):
    """Run TensorFlow for each model specified.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
      required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """
    vms = benchmark_spec.vms
    args = [((vm, benchmark_spec), {}) for vm in vms]
    run_results = vm_util.RunThreaded(_RunOnVm, args)

    # Add vm index to results metadata
    for idx, vm_result in enumerate(run_results):
        for result_sample in vm_result:
            result_sample.metadata['vm_index'] = idx

    # Flatten the list
    return [samples for vm_results in run_results for samples in vm_results]
예제 #19
0
def Prepare(benchmark_spec):
    """Install Redis on one VM and memtier_benchmark on another.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    client_vms = benchmark_spec.vm_groups['clients']
    server_vms = benchmark_spec.vm_groups['servers']
    vm_util.RunThreaded(_InstallRedisEnterprise, client_vms + server_vms)

    server_vm = server_vms[0]
    server_vm.AllowPort(REDIS_PORT)
    server_vm.AllowPort(REDIS_UI_PORT)

    redis_enterprise.OfflineCores(server_vms)
    redis_enterprise.CreateCluster(server_vms)
    redis_enterprise.TuneProxy(server_vms)
    redis_enterprise.CreateDatabase(server_vms, REDIS_PORT)
    redis_enterprise.PinWorkers(server_vms)
    redis_enterprise.WaitForDatabaseUp(server_vm, REDIS_PORT)
    redis_enterprise.LoadDatabase(server_vms, client_vms, REDIS_PORT)
def Run(benchmark_spec):
    """Runs act and reports the results."""
    vm = benchmark_spec.vms[0]
    act.RunActPrep(vm)
    samples = []
    run_samples = []
    for load in FLAGS.act_load:

        def _Run(act_load, index):
            run_samples.extend(act.RunAct(vm, act_load, index))

        if FLAGS.act_parallel:
            args = [((float(load), idx), {}) for idx in range(
                FLAGS.act_reserved_partitions, len(vm.scratch_disks))]
            vm_util.RunThreaded(_Run, args)
        else:
            run_samples.extend(act.RunAct(vm, float(load)))
        samples.extend(run_samples)
        if FLAGS.act_stop_on_complete and act.IsRunComplete(run_samples):
            break
        run_samples = []
    return samples
예제 #21
0
def _RunIperf3ServerClientPair(sending_vm, sender_args, receiving_vm):
  """Create a server-client iperf3 pair.

  The server exits after the client completes its request.

  Args:
    sending_vm: The client VM that will send the UDP/TCP packets.
    sender_args: the client VM iperf3 args.
    receiving_vm: The server VM that will receive the UDP packets.

  Returns:
    output from the client iperf3 process.
  """

  iperf3_exec_dir = ntpath.join(sending_vm.temp_dir, IPERF3_DIR)

  def _RunIperf3(vm, options, is_client):
    # to ensure that the server is up before the client, we wait for 1 second
    # when executing the client command
    command = ('cd {iperf3_exec_dir}; '
               'sleep {delay_time}; '
               '.\\iperf3.exe {options}').format(
                   iperf3_exec_dir=iperf3_exec_dir,
                   delay_time=(1 if is_client else 0),
                   options=options)
    vm.RemoteCommand(command)

  receiver_args = '--server -1'

  threaded_args = [((receiving_vm, receiver_args, False), {}),
                   ((sending_vm, sender_args, True), {})]

  vm_util.RunThreaded(_RunIperf3, threaded_args)

  cat_command = 'cd {iperf3_exec_dir}; cat {out_file}'.format(
      iperf3_exec_dir=iperf3_exec_dir, out_file=IPERF3_OUT_FILE)
  command_out, _ = sending_vm.RemoteCommand(cat_command)

  return command_out
예제 #22
0
def Prepare(benchmark_spec):
    """Prepare the cloud redis instance for memtier tasks.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
      required to run the benchmark.
  """
    benchmark_spec.always_call_cleanup = True

    memtier_vms = benchmark_spec.vm_groups['clients']
    vm_util.RunThreaded(_Install, memtier_vms)

    benchmark_spec.cloud_redis_instance = _GetManagedMemoryStore(
        benchmark_spec)
    benchmark_spec.cloud_redis_instance.Create()
    memory_store_ip = benchmark_spec.cloud_redis_instance.GetMemoryStoreIp()
    memory_store_port = benchmark_spec.cloud_redis_instance.GetMemoryStorePort(
    )
    password = benchmark_spec.cloud_redis_instance.GetMemoryStorePassword()

    for vm in memtier_vms:
        memtier.Load(vm, memory_store_ip, memory_store_port, password)
예제 #23
0
    def Delete(self):
        if FLAGS.run_stage not in ['all', 'cleanup'] or self.deleted:
            return

        if self.vms:
            try:
                vm_util.RunThreaded(self.DeleteVm, self.vms)
            except Exception:
                logging.exception('Got an exception deleting VMs. '
                                  'Attempting to continue tearing down.')
        try:
            self.firewall.DisallowAllPorts()
        except Exception:
            logging.exception('Got an exception disabling firewalls. '
                              'Attempting to continue tearing down.')
        for zone in self.networks:
            try:
                self.networks[zone].Delete()
            except Exception:
                logging.exception('Got an exception deleting networks. '
                                  'Attempting to continue tearing down.')
        self.deleted = True
예제 #24
0
def _BuildIndex(solr_nodes):
    """Downloads Solr index and set it up."""
    for vm in solr_nodes:
        vm.RemoteCommand('cd {0} && '
                         'bin/solr stop -p {1}'.format(solr.SOLR_HOME_DIR,
                                                       SOLR_PORT))

    def DownloadIndex(vm):
        solr_core_dir = posixpath.join(vm.GetScratchDir(), 'solr_cores')
        vm.RobustRemoteCommand('cd {0} && '
                               'wget -O - {1} | '
                               'tar zxvf - -C {2}'.format(
                                   solr_core_dir, INDEX_URL,
                                   'cloudsuite_web_search*'))

    vm_util.RunThreaded(DownloadIndex, solr_nodes, len(solr_nodes))
    server_heap_size = FLAGS.cs_websearch_server_heap_size
    for vm in solr_nodes:
        if vm == solr_nodes[0]:
            solr.StartWithZookeeper(vm, SOLR_PORT, server_heap_size, False)
        else:
            solr.Start(vm, SOLR_PORT, solr_nodes[0], SOLR_PORT + 1000,
                       server_heap_size, False)
예제 #25
0
def PinWorkers(vms: List[_VM], proxy_threads: Optional[int] = None) -> None:
    """Splits the Redis worker threads across the NUMA nodes evenly.

  This function is no-op if --enterprise_redis_pin_workers is not set.

  Args:
    vms: The VMs with the Redis workers to pin.
    proxy_threads: The number of proxy threads per VM.
  """
    if not _PIN_WORKERS.value:
        return

    proxy_threads = proxy_threads or _PROXY_THREADS.value

    def _Pin(vm):
        numa_nodes = vm.CheckLsCpu().numa_node_count
        proxies_per_node = proxy_threads // numa_nodes
        for node in range(numa_nodes):
            node_cpu_list = vm.RemoteCommand(
                'cat /sys/devices/system/node/node%d/cpulist' %
                node)[0].strip()
            # List the PIDs of the Redis worker processes and pin a sliding window of
            # `proxies_per_node` workers to the NUMA nodes in increasing order.
            vm.RemoteCommand(
                r'sudo /opt/redislabs/bin/dmc-cli -ts root list | '
                r'grep worker | '
                r'head -n -{proxies_already_partitioned} | '
                r'tail -n {proxies_per_node} | '
                r"awk '"
                r'{{printf "%i\n",$3}}'
                r"' | "
                r'xargs -i sudo taskset -pc {node_cpu_list} {{}} '.format(
                    proxies_already_partitioned=proxies_per_node * node,
                    proxies_per_node=proxies_per_node,
                    node_cpu_list=node_cpu_list))

    vm_util.RunThreaded(_Pin, vms)
예제 #26
0
    def Analyze(self, sender, benchmark_spec, samples):
        """Analyze mpstat file and record samples.

    Args:
      sender: event sender for collecting stats.
      benchmark_spec: benchmark_spec of this run.
      samples: samples to add stats to.
    """
        def _Analyze(role, output):
            """Parse file and record samples."""
            with open(
                    os.path.join(self.output_directory,
                                 os.path.basename(output)), 'r') as fp:
                output = fp.read()
                metadata = {
                    'event': 'mpstat',
                    'sender': 'run',
                    'role': role,
                }
                samples.extend(_MpstatResults(metadata, output))

        vm_util.RunThreaded(_Analyze,
                            [((k, w), {})
                             for k, w in six.iteritems(self._role_mapping)])
예제 #27
0
def Run(benchmark_spec):
    """Run the IOR benchmark on the vms.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.

  Returns:
    A list of sample.Sample objects.
  """
    master_vm = benchmark_spec.vms[0]
    results = []
    # Run IOR benchmark.
    if FLAGS.ior_num_procs and FLAGS.ior_script:
        remote_script_path = posixpath.join(
            master_vm.scratch_disks[0].mount_point, FLAGS.ior_script)
        master_vm.PushDataFile(
            FLAGS.ior_script,
            remote_script_path,
            # SCP directly to SMB returns an error, so first copy to disk.
            should_double_copy=(FLAGS.data_disk_type == disk.SMB))
        results += ior.RunIOR(master_vm, FLAGS.ior_num_procs,
                              remote_script_path)

    # Run mdtest benchmark.
    phase_args = ('-C', '-T',
                  '-r') if FLAGS.mdtest_drop_caches else ('-C -T -r', )
    mdtest_args = (
        ' '.join(args)
        for args in itertools.product(FLAGS.mdtest_args, phase_args))
    for args in mdtest_args:
        results += ior.RunMdtest(master_vm, FLAGS.mdtest_num_procs, args)
        if FLAGS.mdtest_drop_caches:
            vm_util.RunThreaded(lambda vm: vm.DropCaches(), benchmark_spec.vms)

    return results
예제 #28
0
def Prepare(benchmark_spec):
    """Install netperf on the target vm.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    vms = benchmark_spec.vms
    vms = vms[:2]
    vm_util.RunThreaded(PrepareNetperf, vms)

    num_streams = max(FLAGS.netperf_num_streams)

    # Start the netserver processes
    if vm_util.ShouldRunOnExternalIpAddress():
        # Open all of the command and data ports
        vms[1].AllowPort(PORT_START, PORT_START + num_streams * 2 - 1)
    netserver_cmd = ('for i in $(seq {port_start} 2 {port_end}); do '
                     '{netserver_path} -p $i & done').format(
                         port_start=PORT_START,
                         port_end=PORT_START + num_streams * 2 - 1,
                         netserver_path=netperf.NETSERVER_PATH)
    vms[1].RemoteCommand(netserver_cmd)

    # Install some stuff on the client vm
    vms[0].Install('pip')
    vms[0].RemoteCommand('sudo pip install python-gflags==2.0')

    # Create a scratch directory for the remote test script
    vms[0].RemoteCommand('sudo mkdir -p /tmp/run/')
    vms[0].RemoteCommand('sudo chmod 777 /tmp/run/')
    # Copy remote test script to client
    path = data.ResourcePath(os.path.join(REMOTE_SCRIPTS_DIR, REMOTE_SCRIPT))
    logging.info('Uploading %s to %s', path, vms[0])
    vms[0].PushFile(path, '/tmp/run/')
    vms[0].RemoteCommand('sudo chmod 777 /tmp/run/%s' % REMOTE_SCRIPT)
예제 #29
0
def Prepare(benchmark_spec):
    """Prepare the virtual machines to run YCSB against Cassandra.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
        required to run the benchmark.
  """
    vms = benchmark_spec.vms
    by_role = _GetVMsByRole(benchmark_spec)

    loaders = by_role['clients']
    assert loaders, vms

    # Cassandra cluster
    cassandra_vms = by_role['cassandra_vms']
    assert cassandra_vms, 'No Cassandra VMs: {0}'.format(by_role)
    seed_vm = by_role['seed_vm']
    assert seed_vm, 'No seed VM: {0}'.format(by_role)

    cassandra_install_fns = [
        functools.partial(_InstallCassandra, vm, seed_vms=[seed_vm])
        for vm in cassandra_vms
    ]
    ycsb_install_fns = [
        functools.partial(vm.Install, 'ycsb') for vm in loaders
    ]

    vm_util.RunThreaded(lambda f: f(),
                        cassandra_install_fns + ycsb_install_fns)

    cassandra.StartCluster(seed_vm, by_role['non_seed_cassandra_vms'])

    _CreateYCSBTable(seed_vm)

    benchmark_spec.executor = ycsb.YCSBExecutor(
        'cassandra-10', hosts=','.join(vm.internal_ip for vm in cassandra_vms))
예제 #30
0
def RunCassandraStressTest(cassandra_vms,
                           loader_vms,
                           num_operations,
                           command,
                           profile_operations='insert=1',
                           population_size=None,
                           population_dist=None,
                           population_params=None):
    """Start all loader nodes as Cassandra clients and run stress test.

  Args:
    cassandra_vms: list. A list of vm objects. Cassandra servers.
    load_vms: list. A list of vm objects. Cassandra clients.
    num_keys: integer. The number of operations cassandra-stress clients should
        issue.
    command: string. The cassandra-stress command to use.
    profile_operations: string. The operations to use with user mode.
    population_size: integer. The population size.
    population_dist: string. The population distribution.
    population_params: string. Representing additional population parameters.
  """
    num_loaders = len(loader_vms)
    data_node_ips = [vm.internal_ip for vm in cassandra_vms]
    population_size = population_size or num_operations
    operations_per_vm = int(math.ceil(float(num_operations) / num_loaders))
    population_per_vm = population_size / num_loaders
    if num_operations % num_loaders:
        logging.warn(
            'Total number of operations rounded to %s '
            '(%s operations per loader vm).', operations_per_vm * num_loaders,
            operations_per_vm)
    logging.info('Executing the benchmark.')
    args = [((loader_vms[i], i, operations_per_vm, data_node_ips, command,
              profile_operations, population_per_vm, population_dist,
              population_params), {}) for i in xrange(0, num_loaders)]
    vm_util.RunThreaded(RunTestOnLoader, args)