Пример #1
0
def create_docker_jobspec(name,
                          dockerfile_dir,
                          shell_command,
                          environ={},
                          flake_retries=0,
                          timeout_retries=0):
    """Creates jobspec for a task running under docker."""
    environ = environ.copy()
    environ['RUN_COMMAND'] = shell_command

    docker_args = []
    for k, v in environ.items():
        docker_args += ['-e', '%s=%s' % (k, v)]
    docker_env = {
        'DOCKERFILE_DIR': dockerfile_dir,
        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
        'OUTPUT_DIR': 'artifacts'
    }
    jobspec = jobset.JobSpec(
        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
        docker_args,
        environ=docker_env,
        shortname='build_package.%s' % (name),
        timeout_seconds=30 * 60,
        flake_retries=flake_retries,
        timeout_retries=timeout_retries)
    return jobspec
Пример #2
0
def _workspace_jobspec(name,
                       runtests_args=[],
                       workspace_name=None,
                       runtests_envs={},
                       inner_jobs=_DEFAULT_INNER_JOBS,
                       timeout_seconds=None):
    """Run a single instance of run_tests.py in a separate workspace"""
    if not workspace_name:
        workspace_name = 'workspace_%s' % name
    if not timeout_seconds:
        timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
    shortname = 'run_tests_%s' % name
    env = {'WORKSPACE_NAME': workspace_name}
    env.update(runtests_envs)
    # if report base dir is set, we don't need to ".." to come out of the workspace dir
    report_dir_prefix = '' if os.getenv('GRPC_TEST_REPORT_BASE_DIR',
                                        None) else '../'
    test_job = jobset.JobSpec(cmdline=[
        'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
        '-t', '-j',
        str(inner_jobs), '-x',
        '%srun_tests/%s' %
        (report_dir_prefix, _report_filename(name)), '--report_suite_name',
        '%s' % _safe_report_name(name)
    ] + runtests_args,
                              environ=env,
                              shortname=shortname,
                              timeout_seconds=timeout_seconds,
                              logfilename=_matrix_job_logfilename(shortname))
    return test_job
Пример #3
0
def create_scenario_jobspec(scenario_json,
                            workers,
                            remote_host=None,
                            bq_result_table=None,
                            server_cpu_load=0):
    """Runs one scenario using QPS driver."""
    # setting QPS_WORKERS env variable here makes sure it works with SSH too.
    cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
    if bq_result_table:
        cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
    cmd += 'tools/run_tests/performance/run_qps_driver.sh '
    cmd += '--scenarios_json=%s ' % pipes.quote(
        json.dumps({
            'scenarios': [scenario_json]
        }))
    cmd += '--scenario_result_file=scenario_result.json '
    if server_cpu_load != 0:
        cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
    if remote_host:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
            user_at_host, pipes.quote(cmd))

    return jobset.JobSpec(
        cmdline=[cmd],
        shortname='%s' % scenario_json['name'],
        timeout_seconds=_SCENARIO_TIMEOUT,
        shell=True,
        verbose_success=True)
Пример #4
0
def archive_repo(languages):
    """Archives local version of repo including submodules."""
    # Directory contains symlinks that can't be correctly untarred on Windows
    # so we just skip them as a workaround.
    # See https://github.com/grpc/grpc/issues/16334
    bad_symlinks_dir = '../grpc/third_party/libcxx/test/std/experimental/filesystem/Inputs/static_test_env'
    cmdline = [
        'tar', '--exclude', bad_symlinks_dir, '-cf', '../grpc.tar', '../grpc/'
    ]
    if 'java' in languages:
        cmdline.append('../grpc-java')
    if 'go' in languages:
        cmdline.append('../grpc-go')

    archive_job = jobset.JobSpec(cmdline=cmdline,
                                 shortname='archive_repo',
                                 timeout_seconds=3 * 60)

    jobset.message('START', 'Archiving local repository.', do_newline=True)
    num_failures, _ = jobset.run([archive_job],
                                 newline_on_success=True,
                                 maxjobs=1)
    if num_failures == 0:
        jobset.message('SUCCESS',
                       'Archive with local repository created successfully.',
                       do_newline=True)
    else:
        jobset.message('FAILED',
                       'Failed to archive local repository.',
                       do_newline=True)
        sys.exit(1)
Пример #5
0
def create_netperf_jobspec(server_host='localhost',
                           client_host=None,
                           bq_result_table=None):
    """Runs netperf benchmark."""
    cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
    if bq_result_table:
        cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
    if client_host:
        # If netperf is running remotely, the env variables populated by Jenkins
        # won't be available on the client, but we need them for uploading results
        # to BigQuery.
        jenkins_job_name = os.getenv('JOB_NAME')
        if jenkins_job_name:
            cmd += 'JOB_NAME="%s" ' % jenkins_job_name
        jenkins_build_number = os.getenv('BUILD_NUMBER')
        if jenkins_build_number:
            cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number

    cmd += 'tools/run_tests/performance/run_netperf.sh'
    if client_host:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
            user_at_host, pipes.quote(cmd))

    return jobset.JobSpec(
        cmdline=[cmd],
        shortname='netperf',
        timeout_seconds=_NETPERF_TIMEOUT,
        shell=True,
        verbose_success=True)
Пример #6
0
def create_docker_jobspec(name,
                          dockerfile_dir,
                          shell_command,
                          environ={},
                          flake_retries=0,
                          timeout_retries=0,
                          timeout_seconds=30 * 60,
                          extra_docker_args=None,
                          verbose_success=False):
    """Creates jobspec for a task running under docker."""
    environ = environ.copy()
    environ['RUN_COMMAND'] = shell_command
    environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name

    docker_args = []
    for k, v in environ.items():
        docker_args += ['-e', '%s=%s' % (k, v)]
    docker_env = {
        'DOCKERFILE_DIR': dockerfile_dir,
        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
        'OUTPUT_DIR': 'artifacts'
    }
    if extra_docker_args is not None:
        docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
    jobspec = jobset.JobSpec(
        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
        docker_args,
        environ=docker_env,
        shortname='build_artifact.%s' % (name),
        timeout_seconds=timeout_seconds,
        flake_retries=flake_retries,
        timeout_retries=timeout_retries,
        verbose_success=verbose_success)
    return jobspec
Пример #7
0
def archive_repo(languages):
    """Archives local version of repo including submodules."""
    cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
    if 'java' in languages:
        cmdline.append('../grpc-java')
    if 'go' in languages:
        cmdline.append('../grpc-go')
    if 'node' in languages or 'node_purejs' in languages:
        cmdline.append('../grpc-node')

    archive_job = jobset.JobSpec(
        cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)

    jobset.message('START', 'Archiving local repository.', do_newline=True)
    num_failures, _ = jobset.run(
        [archive_job], newline_on_success=True, maxjobs=1)
    if num_failures == 0:
        jobset.message(
            'SUCCESS',
            'Archive with local repository created successfully.',
            do_newline=True)
    else:
        jobset.message(
            'FAILED', 'Failed to archive local repository.', do_newline=True)
        sys.exit(1)
Пример #8
0
def create_jobspec(name,
                   cmdline,
                   environ={},
                   shell=False,
                   flake_retries=0,
                   timeout_retries=0,
                   timeout_seconds=30 * 60,
                   use_workspace=False,
                   cpu_cost=1.0,
                   verbose_success=False):
    """Creates jobspec."""
    environ = environ.copy()
    if use_workspace:
        environ['WORKSPACE_NAME'] = 'workspace_%s' % name
        environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
        cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
                   ] + cmdline
    else:
        environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)

    jobspec = jobset.JobSpec(cmdline=cmdline,
                             environ=environ,
                             shortname='build_artifact.%s' % (name),
                             timeout_seconds=timeout_seconds,
                             flake_retries=flake_retries,
                             timeout_retries=timeout_retries,
                             shell=shell,
                             cpu_cost=cpu_cost,
                             verbose_success=verbose_success)
    return jobspec
Пример #9
0
def create_docker_jobspec(name,
                          dockerfile_dir,
                          shell_command,
                          environ={},
                          flake_retries=0,
                          timeout_retries=0,
                          copy_rel_path=None,
                          timeout_seconds=30 * 60):
    """Creates jobspec for a task running under docker."""
    environ = environ.copy()
    environ['RUN_COMMAND'] = shell_command
    # the entire repo will be cloned if copy_rel_path is not set.
    if copy_rel_path:
        environ['RELATIVE_COPY_PATH'] = copy_rel_path

    docker_args = []
    for k, v in environ.items():
        docker_args += ['-e', '%s=%s' % (k, v)]
    docker_env = {
        'DOCKERFILE_DIR': dockerfile_dir,
        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
    }
    jobspec = jobset.JobSpec(
        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
        docker_args,
        environ=docker_env,
        shortname='distribtest.%s' % (name),
        timeout_seconds=timeout_seconds,
        flake_retries=flake_retries,
        timeout_retries=timeout_retries)
    return jobspec
Пример #10
0
def _workspace_jobspec(name,
                       runtests_args=[],
                       workspace_name=None,
                       runtests_envs={},
                       inner_jobs=_DEFAULT_INNER_JOBS,
                       timeout_seconds=None):
    """Run a single instance of run_tests.py in a separate workspace"""
    if not workspace_name:
        workspace_name = 'workspace_%s' % name
    if not timeout_seconds:
        timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
    shortname = 'run_tests_%s' % name
    env = {'WORKSPACE_NAME': workspace_name}
    env.update(runtests_envs)
    test_job = jobset.JobSpec(cmdline=[
        'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
        '-t', '-j',
        str(inner_jobs), '-x',
        '../run_tests/%s' % _report_filename(name), '--report_suite_name',
        '%s' % _safe_report_name(name)
    ] + runtests_args,
                              environ=env,
                              shortname=shortname,
                              timeout_seconds=timeout_seconds,
                              logfilename=_matrix_job_logfilename(shortname))
    return test_job
Пример #11
0
def create_qpsworker_job(language,
                         shortname=None,
                         port=10000,
                         remote_host=None,
                         perf_cmd=None):
    cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])

    if remote_host:
        host_and_port = '%s:%s' % (remote_host, port)
    else:
        host_and_port = 'localhost:%s' % port

    perf_file_base_name = None
    if perf_cmd:
        perf_file_base_name = '%s-%s' % (host_and_port, shortname)
        # specify -o output file so perf.data gets collected when worker stopped
        cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
                              ] + cmdline

    if remote_host:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
        ssh_cmd = ['ssh']
        ssh_cmd.extend([
            str(user_at_host),
            'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)
        ])
        cmdline = ssh_cmd

    jobspec = jobset.JobSpec(
        cmdline=cmdline,
        shortname=shortname,
        timeout_seconds=5 * 60,  # workers get restarted after each scenario
        verbose_success=True)
    return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
def dns_server_in_docker_jobspec(grpclb_ips, fallback_ips, shortname,
                                 cause_no_error_no_data_for_balancer_a_record):
    container_name = dockerjob.random_name(shortname)
    run_dns_server_cmdline = [
        'python',
        'test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py',
        '--grpclb_ips=%s' % ','.join(grpclb_ips),
        '--fallback_ips=%s' % ','.join(fallback_ips),
    ]
    if cause_no_error_no_data_for_balancer_a_record:
        run_dns_server_cmdline.append(
            '--cause_no_error_no_data_for_balancer_a_record')
    docker_cmdline = docker_run_cmdline(
        run_dns_server_cmdline,
        cwd='/var/local/git/grpc',
        image=docker_images.get(_FAKE_SERVERS_SAFENAME),
        docker_args=['--name=%s' % container_name])
    jobset.message('IDLE',
                   'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
                   do_newline=True)
    server_job = jobset.JobSpec(cmdline=docker_cmdline,
                                shortname=shortname,
                                timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
Пример #13
0
def server_jobspec(language,
                   docker_image,
                   insecure=False,
                   manual_cmd_log=None):
    """Create jobspec for running a server"""
    container_name = dockerjob.random_name('interop_server_%s' %
                                           language.safename)
    cmdline = bash_cmdline(
        language.server_cmd([
            '--port=%s' % _DEFAULT_SERVER_PORT,
            '--use_tls=%s' % ('false' if insecure else 'true')
        ]))
    environ = language.global_env()
    docker_args = ['--name=%s' % container_name]
    if language.safename == 'http2':
        # we are running the http2 interop server. Open next N ports beginning
        # with the server port. These ports are used for http2 interop test
        # (one test case per port).
        docker_args += list(
            itertools.chain.from_iterable(
                ('-p', str(_DEFAULT_SERVER_PORT + i))
                for i in range(len(_HTTP2_SERVER_TEST_CASES))))
        # Enable docker's healthcheck mechanism.
        # This runs a Python script inside the container every second. The script
        # pings the http2 server to verify it is ready. The 'health-retries' flag
        # specifies the number of consecutive failures before docker will report
        # the container's status as 'unhealthy'. Prior to the first 'health_retries'
        # failures or the first success, the status will be 'starting'. 'docker ps'
        # or 'docker inspect' can be used to see the health of the container on the
        # command line.
        docker_args += [
            '--health-cmd=python test/http2_test/http2_server_health_check.py '
            '--server_host=%s --server_port=%d' %
            ('localhost', _DEFAULT_SERVER_PORT),
            '--health-interval=1s',
            '--health-retries=5',
            '--health-timeout=10s',
        ]

    else:
        docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]

    docker_cmdline = docker_run_cmdline(cmdline,
                                        image=docker_image,
                                        cwd=language.server_cwd,
                                        environ=environ,
                                        docker_args=docker_args)
    if manual_cmd_log is not None:
        if manual_cmd_log == []:
            manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
                                  docker_image)
        manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
    server_job = jobset.JobSpec(cmdline=docker_cmdline,
                                environ=environ,
                                shortname='interop_server_%s' % language,
                                timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
Пример #14
0
def collect_perf(bm_name, args):
  """generate flamegraphs"""
  heading('Flamegraphs: %s' % bm_name)
  subprocess.check_call(
      ['make', bm_name,
       'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
  benchmarks = []
  profile_analysis = []
  cleanup = []
  for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
                                       '--benchmark_list_tests']).splitlines():
    link(line, '%s.svg' % fnize(line))
    benchmarks.append(
        jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
                        '-g', '-F', '997',
                        'bins/mutrace/%s' % bm_name,
                        '--benchmark_filter=^%s$' % line,
                        '--benchmark_min_time=10']))
    profile_analysis.append(
        jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
                       environ = {
                           'PERF_BASE_NAME': fnize(line),
                           'OUTPUT_DIR': 'reports',
                           'OUTPUT_FILENAME': fnize(line),
                       }))
    cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
    cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
    # periodically flush out the list of jobs: temporary space required for this
    # processing is large
    if len(benchmarks) >= 20:
      # run up to half the cpu count: each benchmark can use up to two cores
      # (one for the microbenchmark, one for the data flush)
      jobset.run(benchmarks, maxjobs=1,
                 add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
      jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
      jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
      benchmarks = []
      profile_analysis = []
      cleanup = []
  # run the remaining benchmarks that weren't flushed
  if len(benchmarks):
    jobset.run(benchmarks, maxjobs=1,
               add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
    jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
    jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
Пример #15
0
def cloud_to_cloud_jobspec(language,
                           test_case,
                           server_name,
                           server_host,
                           server_port,
                           docker_image=None,
                           insecure=False):
    """Creates jobspec for cloud-to-cloud interop test"""
    interop_only_options = [
        '--server_host_override=foo.test.google.fr',
        '--use_tls=%s' % ('false' if insecure else 'true'),
        '--use_test_ca=true',
    ]
    common_options = [
        '--test_case=%s' % test_case,
        '--server_host=%s' % server_host,
    ]
    if test_case in _HTTP2_BADSERVER_TEST_CASES:
        # We are running the http2_badserver_interop test. Adjust command line accordingly.
        offset = sorted(_HTTP2_BADSERVER_TEST_CASES).index(test_case)
        client_options = common_options + [
            '--server_port=%s' % (int(server_port) + offset)
        ]
        cmdline = bash_cmdline(
            language.client_cmd_http2interop(client_options))
        cwd = language.http2_cwd
    else:
        client_options = interop_only_options + common_options + [
            '--server_port=%s' % server_port
        ]
        cmdline = bash_cmdline(language.client_cmd(client_options))
        cwd = language.client_cwd

    environ = language.global_env()
    if docker_image:
        container_name = dockerjob.random_name('interop_client_%s' %
                                               language.safename)
        cmdline = docker_run_cmdline(
            cmdline,
            image=docker_image,
            environ=environ,
            cwd=cwd,
            docker_args=['--net=host', '--name', container_name])
        cwd = None

    test_job = jobset.JobSpec(cmdline=cmdline,
                              cwd=cwd,
                              environ=environ,
                              shortname='cloud_to_cloud:%s:%s_server:%s' %
                              (language, server_name, test_case),
                              timeout_seconds=_TEST_TIMEOUT,
                              flake_retries=5 if args.allow_flakes else 0,
                              timeout_retries=2 if args.allow_flakes else 0,
                              kill_handler=_job_kill_handler)
    if docker_image:
        test_job.container_name = container_name
    return test_job
Пример #16
0
def cloud_to_prod_jobspec(language,
                          test_case,
                          server_host_nickname,
                          server_host,
                          docker_image=None,
                          auth=False,
                          manual_cmd_log=None,
                          service_account_key_file=None):
    """Creates jobspec for cloud-to-prod interop test"""
    container_name = None
    cmdargs = [
        '--server_host=%s' % server_host,
        '--server_host_override=%s' % server_host, '--server_port=443',
        '--use_tls=true',
        '--test_case=%s' % test_case
    ]
    environ = dict(language.cloud_to_prod_env(), **language.global_env())
    if auth:
        auth_cmdargs, auth_env = auth_options(language, test_case,
                                              service_account_key_file)
        cmdargs += auth_cmdargs
        environ.update(auth_env)
    cmdline = bash_cmdline(language.client_cmd(cmdargs))
    cwd = language.client_cwd

    if docker_image:
        container_name = dockerjob.random_name('interop_client_%s' %
                                               language.safename)
        cmdline = docker_run_cmdline(
            cmdline,
            image=docker_image,
            cwd=cwd,
            environ=environ,
            docker_args=['--net=host',
                         '--name=%s' % container_name])
        if manual_cmd_log is not None:
            if manual_cmd_log == []:
                manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
                                      docker_image)
            manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
        cwd = None
        environ = None

    suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
    test_job = jobset.JobSpec(
        cmdline=cmdline,
        cwd=cwd,
        environ=environ,
        shortname='%s:%s:%s:%s' %
        (suite_name, language, server_host_nickname, test_case),
        timeout_seconds=_TEST_TIMEOUT,
        flake_retries=4 if args.allow_flakes else 0,
        timeout_retries=2 if args.allow_flakes else 0,
        kill_handler=_job_kill_handler)
    if docker_image:
        test_job.container_name = container_name
    return test_job
Пример #17
0
def build_on_remote_hosts(hosts,
                          languages=list(scenario_config.LANGUAGES.keys()),
                          build_local=False):
    """Builds performance worker on remote hosts (and maybe also locally)."""
    build_timeout = 45 * 60
    # Kokoro VMs (which are local only) do not have caching, so they need more time to build
    local_build_timeout = 60 * 60
    build_jobs = []
    for host in hosts:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
                languages,
                shortname='remote_host_build.%s' % host,
                environ={
                    'USER_AT_HOST': user_at_host,
                    'CONFIG': 'opt'
                },
                timeout_seconds=build_timeout))
    if build_local:
        # start port server locally
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['python', 'tools/run_tests/start_port_server.py'],
                shortname='local_start_port_server',
                timeout_seconds=2 * 60))
        # Build locally as well
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/build_performance.sh'] +
                languages,
                shortname='local_build',
                environ={'CONFIG': 'opt'},
                timeout_seconds=local_build_timeout))
    jobset.message('START', 'Building.', do_newline=True)
    num_failures, _ = jobset.run(build_jobs,
                                 newline_on_success=True,
                                 maxjobs=10)
    if num_failures == 0:
        jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
    else:
        jobset.message('FAILED', 'Build failed.', do_newline=True)
        sys.exit(1)
Пример #18
0
def collect_latency(bm_name, args):
  """generate latency profiles"""
  benchmarks = []
  profile_analysis = []
  cleanup = []

  heading('Latency Profiles: %s' % bm_name)
  subprocess.check_call(
      ['make', bm_name,
       'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
  for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
                                       '--benchmark_list_tests']).splitlines():
    link(line, '%s.txt' % fnize(line))
    benchmarks.append(
        jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line],
                       environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
    profile_analysis.append(
        jobset.JobSpec([sys.executable,
                        'tools/profiling/latency_profile/profile_analyzer.py',
                        '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
                        '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None))
    cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
    # periodically flush out the list of jobs: profile_analysis jobs at least
    # consume upwards of five gigabytes of ram in some cases, and so analysing
    # hundreds of them at once is impractical -- but we want at least some
    # concurrency or the work takes too long
    if len(benchmarks) >= min(4, multiprocessing.cpu_count()):
      # run up to half the cpu count: each benchmark can use up to two cores
      # (one for the microbenchmark, one for the data flush)
      jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
                 add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
      jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
      jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
      benchmarks = []
      profile_analysis = []
      cleanup = []
  # run the remaining benchmarks that weren't flushed
  if len(benchmarks):
    jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
               add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
    jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
    jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
Пример #19
0
def _docker_jobspec(name, runtests_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
    """Run a single instance of run_tests.py in a docker container"""
    test_job = jobset.JobSpec(cmdline=[
        'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', '-j',
        str(inner_jobs), '-x',
        'report_%s_%s' % (name, _REPORT_SUFFIX), '--report_suite_name',
        '%s' % name
    ] + runtests_args,
                              shortname='run_tests_%s' % name,
                              timeout_seconds=_RUNTESTS_TIMEOUT)
    return test_job
Пример #20
0
def build_interop_stress_image_jobspec(language, tag=None):
  """Creates jobspec for building stress test docker image for a language"""
  if not tag:
    tag = 'grpc_interop_stress_%s:%s' % (language.safename, uuid.uuid4())
  env = {'INTEROP_IMAGE': tag,
         'BASE_NAME': 'grpc_interop_stress_%s' % language.safename}
  build_job = jobset.JobSpec(cmdline=['tools/run_tests/dockerize/build_interop_stress_image.sh'],
                             environ=env,
                             shortname='build_docker_%s' % (language),
                             timeout_seconds=30 * 60)
  build_job.tag = tag
  return build_job
Пример #21
0
def create_jobspec(name, cmdline, environ=None, shell=False,
                   flake_retries=0, timeout_retries=0):
  """Creates jobspec."""
  jobspec = jobset.JobSpec(
          cmdline=cmdline,
          environ=environ,
          shortname='distribtest.%s' % (name),
          timeout_seconds=10*60,
          flake_retries=flake_retries,
          timeout_retries=timeout_retries,
          shell=shell)
  return jobspec
Пример #22
0
def server_jobspec(language,
                   docker_image,
                   insecure=False,
                   manual_cmd_log=None):
    """Create jobspec for running a server"""
    container_name = dockerjob.random_name('interop_server_%s' %
                                           language.safename)
    cmdline = bash_cmdline(
        language.server_cmd([
            '--port=%s' % _DEFAULT_SERVER_PORT,
            '--use_tls=%s' % ('false' if insecure else 'true')
        ]))
    environ = language.global_env()
    if language.safename == 'http2':
        # we are running the http2 interop server. Open next N ports beginning
        # with the server port. These ports are used for http2 interop test
        # (one test case per port). We also attach the docker container running
        # the server to local network, so we don't have to mess with port mapping
        port_args = [
            '-p',
            str(_DEFAULT_SERVER_PORT + 0),
            '-p',
            str(_DEFAULT_SERVER_PORT + 1),
            '-p',
            str(_DEFAULT_SERVER_PORT + 2),
            '-p',
            str(_DEFAULT_SERVER_PORT + 3),
            '-p',
            str(_DEFAULT_SERVER_PORT + 4),
            '-p',
            str(_DEFAULT_SERVER_PORT + 5),
            '-p',
            str(_DEFAULT_SERVER_PORT + 6),
            '--net=host',
        ]
    else:
        port_args = ['-p', str(_DEFAULT_SERVER_PORT)]

    docker_cmdline = docker_run_cmdline(cmdline,
                                        image=docker_image,
                                        cwd=language.server_cwd,
                                        environ=environ,
                                        docker_args=port_args +
                                        ['--name=%s' % container_name])
    if manual_cmd_log is not None:
        manual_cmd_log.append(manual_cmdline(docker_cmdline))
    server_job = jobset.JobSpec(cmdline=docker_cmdline,
                                environ=environ,
                                shortname='interop_server_%s' % language,
                                timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
Пример #23
0
def create_quit_jobspec(workers, remote_host=None):
  """Runs quit using QPS driver."""
  # setting QPS_WORKERS env variable here makes sure it works with SSH too.
  cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(w.host_and_port for w in workers)
  if remote_host:
    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))

  return jobset.JobSpec(
      cmdline=[cmd],
      shortname='qps_json_driver.quit',
      timeout_seconds=3*60,
      shell=True,
      verbose_success=True)
def build_interop_image_jobspec(lang_safename, basename_prefix='grpc_interop'):
    """Creates jobspec for building interop docker image for a language"""
    tag = '%s_%s:%s' % (basename_prefix, lang_safename, uuid.uuid4())
    env = {
        'INTEROP_IMAGE': tag,
        'BASE_NAME': '%s_%s' % (basename_prefix, lang_safename),
    }
    build_job = jobset.JobSpec(
        cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
        environ=env,
        shortname='build_docker_%s' % lang_safename,
        timeout_seconds=30 * 60)
    build_job.tag = tag
    return build_job
Пример #25
0
def build_on_remote_hosts(hosts,
                          languages=scenario_config.LANGUAGES.keys(),
                          build_local=False):
    """Builds performance worker on remote hosts (and maybe also locally)."""
    build_timeout = 15 * 60
    build_jobs = []
    for host in hosts:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
                languages,
                shortname='remote_host_build.%s' % host,
                environ={
                    'USER_AT_HOST': user_at_host,
                    'CONFIG': 'opt'
                },
                timeout_seconds=build_timeout))
    if build_local:
        # Build locally as well
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/build_performance.sh'] +
                languages,
                shortname='local_build',
                environ={'CONFIG': 'opt'},
                timeout_seconds=build_timeout))
    jobset.message('START', 'Building.', do_newline=True)
    num_failures, _ = jobset.run(build_jobs,
                                 newline_on_success=True,
                                 maxjobs=10,
                                 clear_alarms=False)
    if num_failures == 0:
        jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
    else:
        jobset.message('FAILED', 'Build failed.', do_newline=True)
        sys.exit(1)
Пример #26
0
def _workspace_jobspec(name, runtests_args=[], workspace_name=None, inner_jobs=_DEFAULT_INNER_JOBS):
  """Run a single instance of run_tests.py in a separate workspace"""
  if not workspace_name:
    workspace_name = 'workspace_%s' % name
  env = {'WORKSPACE_NAME': workspace_name}
  test_job = jobset.JobSpec(
          cmdline=['bash',
                   'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
                   '-t',
                   '-j', str(inner_jobs),
                   '-x', '../report_%s.xml' % name,
                   '--report_suite_name', '%s' % name] + runtests_args,
          environ=env,
          shortname='run_tests_%s' % name,
          timeout_seconds=_RUNTESTS_TIMEOUT)
  return test_job
Пример #27
0
def _docker_jobspec(name, runtests_args=[], runtests_envs={},
                    inner_jobs=_DEFAULT_INNER_JOBS,
                    timeout_seconds=None):
  """Run a single instance of run_tests.py in a docker container"""
  if not timeout_seconds:
    timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
  test_job = jobset.JobSpec(
          cmdline=['python', 'tools/run_tests/run_tests.py',
                   '--use_docker',
                   '-t',
                   '-j', str(inner_jobs),
                   '-x', _report_filename(name),
                   '--report_suite_name', '%s' % name] + runtests_args,
          environ=runtests_envs,
          shortname='run_tests_%s' % name,
          timeout_seconds=timeout_seconds)
  return test_job
Пример #28
0
def perf_report_processor_job(worker_host, perf_base_name, output_filename,
                              flame_graph_reports):
    print('Creating perf report collection job for %s' % worker_host)
    cmd = ''
    if worker_host != 'localhost':
        user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
        cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s tools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
            user_at_host, output_filename, flame_graph_reports, perf_base_name)
    else:
        cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s tools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
            output_filename, flame_graph_reports, perf_base_name)

    return jobset.JobSpec(cmdline=cmd,
                          timeout_seconds=3 * 60,
                          shell=True,
                          verbose_success=True,
                          shortname='process perf report')
Пример #29
0
def cloud_to_cloud_jobspec(language,
                           test_cases,
                           server_addresses,
                           test_duration_secs,
                           num_channels_per_server,
                           num_stubs_per_channel,
                           metrics_port,
                           docker_image=None):
    """Creates jobspec for cloud-to-cloud interop test"""
    cmdline = bash_login_cmdline(
        language.client_cmd([
            '--test_cases=%s' % test_cases,
            '--server_addresses=%s' % server_addresses,
            '--test_duration_secs=%s' % test_duration_secs,
            '--num_stubs_per_channel=%s' % num_stubs_per_channel,
            '--num_channels_per_server=%s' % num_channels_per_server,
            '--metrics_port=%s' % metrics_port
        ]))
    print(cmdline)
    cwd = language.client_cwd
    environ = language.global_env()
    if docker_image:
        container_name = dockerjob.random_name('interop_client_%s' %
                                               language.safename)
        cmdline = docker_run_cmdline(
            cmdline,
            image=docker_image,
            environ=environ,
            cwd=cwd,
            docker_args=['--net=host', '--name', container_name])
        cwd = None

    test_job = jobset.JobSpec(
        cmdline=cmdline,
        cwd=cwd,
        environ=environ,
        shortname='cloud_to_cloud:%s:%s_server:stress_test' %
        (language, server_name),
        timeout_seconds=test_duration_secs * 2,
        flake_retries=0,
        timeout_retries=0,
        kill_handler=_job_kill_handler)
    test_job.container_name = container_name
    return test_job
Пример #30
0
def create_jobspec(name,
                   cmdline,
                   environ=None,
                   cwd=None,
                   shell=False,
                   flake_retries=0,
                   timeout_retries=0,
                   cpu_cost=1.0):
    """Creates jobspec."""
    jobspec = jobset.JobSpec(cmdline=cmdline,
                             environ=environ,
                             cwd=cwd,
                             shortname='build_package.%s' % (name),
                             timeout_seconds=10 * 60,
                             flake_retries=flake_retries,
                             timeout_retries=timeout_retries,
                             cpu_cost=cpu_cost,
                             shell=shell)
    return jobspec