def dns_server_in_docker_jobspec(grpclb_ips, fallback_ips, shortname,
                                 cause_no_error_no_data_for_balancer_a_record):
    container_name = dockerjob.random_name(shortname)
    run_dns_server_cmdline = [
        'python',
        'test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py',
        '--grpclb_ips=%s' % ','.join(grpclb_ips),
        '--fallback_ips=%s' % ','.join(fallback_ips),
    ]
    if cause_no_error_no_data_for_balancer_a_record:
        run_dns_server_cmdline.append(
            '--cause_no_error_no_data_for_balancer_a_record')
    docker_cmdline = docker_run_cmdline(
        run_dns_server_cmdline,
        cwd='/var/local/git/grpc',
        image=docker_images.get(_FAKE_SERVERS_SAFENAME),
        docker_args=['--name=%s' % container_name])
    jobset.message(
        'IDLE',
        'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
        do_newline=True)
    server_job = jobset.JobSpec(
        cmdline=docker_cmdline, shortname=shortname, timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
def dns_server_in_docker_jobspec(grpclb_ips, fallback_ips, shortname,
                                 cause_no_error_no_data_for_balancer_a_record):
    container_name = dockerjob.random_name(shortname)
    run_dns_server_cmdline = [
        'python',
        'test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py',
        '--grpclb_ips=%s' % ','.join(grpclb_ips),
        '--fallback_ips=%s' % ','.join(fallback_ips),
    ]
    if cause_no_error_no_data_for_balancer_a_record:
        run_dns_server_cmdline.append(
            '--cause_no_error_no_data_for_balancer_a_record')
    docker_cmdline = docker_run_cmdline(
        run_dns_server_cmdline,
        cwd='/var/local/git/grpc',
        image=docker_images.get(_FAKE_SERVERS_SAFENAME),
        docker_args=['--name=%s' % container_name])
    jobset.message('IDLE',
                   'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
                   do_newline=True)
    server_job = jobset.JobSpec(cmdline=docker_cmdline,
                                shortname=shortname,
                                timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
Esempio n. 3
0
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
                                  flame_graph_reports):
    perf_report_jobs = []
    global profile_output_files
    for host_and_port in hosts_and_base_names:
        perf_base_name = hosts_and_base_names[host_and_port]
        output_filename = '%s-%s' % (scenario_name, perf_base_name)
        # from the base filename, create .svg output filename
        host = host_and_port.split(':')[0]
        profile_output_files.append('%s.svg' % output_filename)
        perf_report_jobs.append(
            perf_report_processor_job(host, perf_base_name, output_filename,
                                      flame_graph_reports))

    jobset.message('START',
                   'Collecting perf reports from qps workers',
                   do_newline=True)
    failures, _ = jobset.run(perf_report_jobs,
                             newline_on_success=True,
                             maxjobs=1,
                             clear_alarms=False)
    jobset.message('END',
                   'Collecting perf reports from qps workers',
                   do_newline=True)
    return failures
Esempio n. 4
0
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name):
  perf_report_jobs = []
  global profile_output_files
  for host_and_port in hosts_and_base_names:
    perf_base_name = hosts_and_base_names[host_and_port]
    output_filename = '%s-%s' % (scenario_name, perf_base_name)
    # from the base filename, create .svg output filename
    host = host_and_port.split(':')[0]
    profile_output_files.append('%s.svg' % output_filename)
    perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename))

  jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
  failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
  jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
  return failures
def lb_client_interop_jobspec(language,
                              dns_server_ip,
                              docker_image,
                              transport_security='tls'):
    """Runs a gRPC client under test in a docker container"""
    interop_only_options = [
        '--server_host=%s' % _SERVICE_NAME,
        '--server_port=%d' % _FALLBACK_SERVER_PORT
    ] + transport_security_to_args(transport_security)
    # Don't set the server host override in any client;
    # Go and Java default to no override.
    # We're using a DNS server so there's no need.
    if language.safename == 'c++':
        interop_only_options += ['--server_host_override=""']
    # Don't set --use_test_ca; we're configuring
    # clients to use test CA's via alternate means.
    interop_only_options += ['--use_test_ca=false']
    client_args = language.client_cmd(interop_only_options)
    container_name = dockerjob.random_name(
        'lb_interop_client_%s' % language.safename)
    docker_cmdline = docker_run_cmdline(
        client_args,
        environ=language.global_env(),
        image=docker_image,
        cwd=language.client_cwd,
        docker_args=[
            '--dns=%s' % dns_server_ip,
            '--net=host',
            '--name=%s' % container_name,
            '-v',
            '{grpc_grpc_root_dir}:/external_mount:ro'.format(
                grpc_grpc_root_dir=ROOT),
        ])
    jobset.message(
        'IDLE',
        'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
        do_newline=True)
    test_job = jobset.JobSpec(
        cmdline=docker_cmdline,
        shortname=('lb_interop_client:%s' % language),
        timeout_seconds=_TEST_TIMEOUT,
        kill_handler=_job_kill_handler)
    test_job.container_name = container_name
    return test_job
def lb_client_interop_jobspec(language,
                              dns_server_ip,
                              docker_image,
                              transport_security='tls'):
    """Runs a gRPC client under test in a docker container"""
    interop_only_options = [
        '--server_host=%s' % _SERVICE_NAME,
        '--server_port=%d' % _FALLBACK_SERVER_PORT
    ] + transport_security_to_args(transport_security)
    # Don't set the server host override in any client;
    # Go and Java default to no override.
    # We're using a DNS server so there's no need.
    if language.safename == 'c++':
        interop_only_options += ['--server_host_override=""']
    # Don't set --use_test_ca; we're configuring
    # clients to use test CA's via alternate means.
    interop_only_options += ['--use_test_ca=false']
    client_args = language.client_cmd(interop_only_options)
    container_name = dockerjob.random_name('lb_interop_client_%s' %
                                           language.safename)
    docker_cmdline = docker_run_cmdline(
        client_args,
        environ=language.global_env(),
        image=docker_image,
        cwd=language.client_cwd,
        docker_args=[
            '--dns=%s' % dns_server_ip,
            '--net=host',
            '--name=%s' % container_name,
            '-v',
            '{grpc_grpc_root_dir}:/external_mount:ro'.format(
                grpc_grpc_root_dir=ROOT),
        ])
    jobset.message('IDLE',
                   'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
                   do_newline=True)
    test_job = jobset.JobSpec(cmdline=docker_cmdline,
                              shortname=('lb_interop_client:%s' % language),
                              timeout_seconds=_TEST_TIMEOUT,
                              kill_handler=_job_kill_handler)
    test_job.container_name = container_name
    return test_job
def grpc_server_in_docker_jobspec(server_cmdline, shortname):
    container_name = dockerjob.random_name(shortname)
    environ = {
        'GRPC_GO_LOG_VERBOSITY_LEVEL': '3',
        'GRPC_GO_LOG_SEVERITY_LEVEL': 'INFO ',
    }
    docker_cmdline = docker_run_cmdline(
        server_cmdline,
        cwd='/go',
        image=docker_images.get(_FAKE_SERVERS_SAFENAME),
        environ=environ,
        docker_args=['--name=%s' % container_name])
    jobset.message(
        'IDLE',
        'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
        do_newline=True)
    server_job = jobset.JobSpec(
        cmdline=docker_cmdline, shortname=shortname, timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
def grpc_server_in_docker_jobspec(server_cmdline, shortname):
    container_name = dockerjob.random_name(shortname)
    environ = {
        'GRPC_GO_LOG_VERBOSITY_LEVEL': '3',
        'GRPC_GO_LOG_SEVERITY_LEVEL': 'INFO ',
    }
    docker_cmdline = docker_run_cmdline(
        server_cmdline,
        cwd='/go',
        image=docker_images.get(_FAKE_SERVERS_SAFENAME),
        environ=environ,
        docker_args=['--name=%s' % container_name])
    jobset.message('IDLE',
                   'docker_cmdline:\b|%s|' % ' '.join(docker_cmdline),
                   do_newline=True)
    server_job = jobset.JobSpec(cmdline=docker_cmdline,
                                shortname=shortname,
                                timeout_seconds=30 * 60)
    server_job.container_name = container_name
    return server_job
Esempio n. 9
0
def prepare_remote_hosts(hosts, prepare_local=False):
  """Prepares remote hosts (and maybe prepare localhost as well)."""
  prepare_timeout = 5*60
  prepare_jobs = []
  for host in hosts:
    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
    prepare_jobs.append(
        jobset.JobSpec(
            cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
            shortname='remote_host_prepare.%s' % host,
            environ = {'USER_AT_HOST': user_at_host},
            timeout_seconds=prepare_timeout))
  if prepare_local:
    # Prepare localhost as well
    prepare_jobs.append(
        jobset.JobSpec(
            cmdline=['tools/run_tests/performance/kill_workers.sh'],
            shortname='local_prepare',
            timeout_seconds=prepare_timeout))
  jobset.message('START', 'Preparing hosts.', do_newline=True)
  num_failures, _ = jobset.run(
      prepare_jobs, newline_on_success=True, maxjobs=10)
  if num_failures == 0:
    jobset.message('SUCCESS',
                   'Prepare step completed successfully.',
                   do_newline=True)
  else:
    jobset.message('FAILED', 'Failed to prepare remote hosts.',
                   do_newline=True)
    sys.exit(1)
Esempio n. 10
0
def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
  """Builds performance worker on remote hosts (and maybe also locally)."""
  build_timeout = 15*60
  build_jobs = []
  for host in hosts:
    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
    build_jobs.append(
        jobset.JobSpec(
            cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
            shortname='remote_host_build.%s' % host,
            environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
            timeout_seconds=build_timeout))
  if build_local:
    # Build locally as well
    build_jobs.append(
        jobset.JobSpec(
            cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
            shortname='local_build',
            environ = {'CONFIG': 'opt'},
            timeout_seconds=build_timeout))
  jobset.message('START', 'Building.', do_newline=True)
  num_failures, _ = jobset.run(
      build_jobs, newline_on_success=True, maxjobs=10)
  if num_failures == 0:
    jobset.message('SUCCESS',
                   'Built successfully.',
                   do_newline=True)
  else:
    jobset.message('FAILED', 'Build failed.',
                   do_newline=True)
    sys.exit(1)
Esempio n. 11
0
def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
  """Builds performance worker on remote hosts (and maybe also locally)."""
  build_timeout = 15*60
  build_jobs = []
  for host in hosts:
    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
    build_jobs.append(
        jobset.JobSpec(
            cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
            shortname='remote_host_build.%s' % host,
            environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
            timeout_seconds=build_timeout))
  if build_local:
    # Build locally as well
    build_jobs.append(
        jobset.JobSpec(
            cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
            shortname='local_build',
            environ = {'CONFIG': 'opt'},
            timeout_seconds=build_timeout))
  jobset.message('START', 'Building.', do_newline=True)
  num_failures, _ = jobset.run(
      build_jobs, newline_on_success=True, maxjobs=10)
  if num_failures == 0:
    jobset.message('SUCCESS',
                   'Built successfully.',
                   do_newline=True)
  else:
    jobset.message('FAILED', 'Build failed.',
                   do_newline=True)
    sys.exit(1)
Esempio n. 12
0
def archive_repo(languages):
    """Archives local version of repo including submodules."""
    # Directory contains symlinks that can't be correctly untarred on Windows
    # so we just skip them as a workaround.
    # See https://github.com/grpc/grpc/issues/16334
    bad_symlinks_dir = '../grpc/third_party/libcxx/test/std/experimental/filesystem/Inputs/static_test_env'
    cmdline = [
        'tar', '--exclude', bad_symlinks_dir, '-cf', '../grpc.tar', '../grpc/'
    ]
    if 'java' in languages:
        cmdline.append('../grpc-java')
    if 'go' in languages:
        cmdline.append('../grpc-go')
    if 'node' in languages or 'node_purejs' in languages:
        cmdline.append('../grpc-node')

    archive_job = jobset.JobSpec(
        cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)

    jobset.message('START', 'Archiving local repository.', do_newline=True)
    num_failures, _ = jobset.run(
        [archive_job], newline_on_success=True, maxjobs=1)
    if num_failures == 0:
        jobset.message(
            'SUCCESS',
            'Archive with local repository created successfully.',
            do_newline=True)
    else:
        jobset.message(
            'FAILED', 'Failed to archive local repository.', do_newline=True)
        sys.exit(1)
Esempio n. 13
0
def archive_repo(languages):
    """Archives local version of repo including submodules."""
    cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
    if 'java' in languages:
        cmdline.append('../grpc-java')
    if 'go' in languages:
        cmdline.append('../grpc-go')

    archive_job = jobset.JobSpec(cmdline=cmdline,
                                 shortname='archive_repo',
                                 timeout_seconds=3 * 60)

    jobset.message('START', 'Archiving local repository.', do_newline=True)
    num_failures, _ = jobset.run([archive_job],
                                 newline_on_success=True,
                                 maxjobs=1,
                                 clear_alarms=False)
    if num_failures == 0:
        jobset.message('SUCCESS',
                       'Archive with local repository created successfully.',
                       do_newline=True)
    else:
        jobset.message('FAILED',
                       'Failed to archive local repository.',
                       do_newline=True)
        sys.exit(1)
Esempio n. 14
0
def archive_repo(languages):
    """Archives local version of repo including submodules."""
    # Directory contains symlinks that can't be correctly untarred on Windows
    # so we just skip them as a workaround.
    # See https://github.com/grpc/grpc/issues/16334
    bad_symlinks_dir = '../grpc/third_party/libcxx/test/std/experimental/filesystem/Inputs/static_test_env'
    cmdline = [
        'tar', '--exclude', bad_symlinks_dir, '-cf', '../grpc.tar', '../grpc/'
    ]
    if 'java' in languages:
        cmdline.append('../grpc-java')
    if 'go' in languages:
        cmdline.append('../grpc-go')

    archive_job = jobset.JobSpec(cmdline=cmdline,
                                 shortname='archive_repo',
                                 timeout_seconds=3 * 60)

    jobset.message('START', 'Archiving local repository.', do_newline=True)
    num_failures, _ = jobset.run([archive_job],
                                 newline_on_success=True,
                                 maxjobs=1)
    if num_failures == 0:
        jobset.message('SUCCESS',
                       'Archive with local repository created successfully.',
                       do_newline=True)
    else:
        jobset.message('FAILED',
                       'Failed to archive local repository.',
                       do_newline=True)
        sys.exit(1)
Esempio n. 15
0
def prepare_remote_hosts(hosts, prepare_local=False):
    """Prepares remote hosts (and maybe prepare localhost as well)."""
    prepare_timeout = 5 * 60
    prepare_jobs = []
    for host in hosts:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
        prepare_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
                shortname='remote_host_prepare.%s' % host,
                environ={'USER_AT_HOST': user_at_host},
                timeout_seconds=prepare_timeout))
    if prepare_local:
        # Prepare localhost as well
        prepare_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/kill_workers.sh'],
                shortname='local_prepare',
                timeout_seconds=prepare_timeout))
    jobset.message('START', 'Preparing hosts.', do_newline=True)
    num_failures, _ = jobset.run(prepare_jobs,
                                 newline_on_success=True,
                                 maxjobs=10,
                                 clear_alarms=False)
    if num_failures == 0:
        jobset.message('SUCCESS',
                       'Prepare step completed successfully.',
                       do_newline=True)
    else:
        jobset.message('FAILED',
                       'Failed to prepare remote hosts.',
                       do_newline=True)
        sys.exit(1)
Esempio n. 16
0
def build_on_remote_hosts(hosts,
                          languages=list(scenario_config.LANGUAGES.keys()),
                          build_local=False):
    """Builds performance worker on remote hosts (and maybe also locally)."""
    build_timeout = 45 * 60
    # Kokoro VMs (which are local only) do not have caching, so they need more time to build
    local_build_timeout = 60 * 60
    build_jobs = []
    for host in hosts:
        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
                languages,
                shortname='remote_host_build.%s' % host,
                environ={
                    'USER_AT_HOST': user_at_host,
                    'CONFIG': 'opt'
                },
                timeout_seconds=build_timeout))
    if build_local:
        # start port server locally
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['python', 'tools/run_tests/start_port_server.py'],
                shortname='local_start_port_server',
                timeout_seconds=2 * 60))
        # Build locally as well
        build_jobs.append(
            jobset.JobSpec(
                cmdline=['tools/run_tests/performance/build_performance.sh'] +
                languages,
                shortname='local_build',
                environ={'CONFIG': 'opt'},
                timeout_seconds=local_build_timeout))
    jobset.message('START', 'Building.', do_newline=True)
    num_failures, _ = jobset.run(build_jobs,
                                 newline_on_success=True,
                                 maxjobs=10)
    if num_failures == 0:
        jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
    else:
        jobset.message('FAILED', 'Build failed.', do_newline=True)
        sys.exit(1)
Esempio n. 17
0
def archive_repo(languages):
    """Archives local version of repo including submodules."""
    cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
    if 'java' in languages:
        cmdline.append('../grpc-java')
    if 'go' in languages:
        cmdline.append('../grpc-go')

    archive_job = jobset.JobSpec(
        cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)

    jobset.message('START', 'Archiving local repository.', do_newline=True)
    num_failures, _ = jobset.run(
        [archive_job], newline_on_success=True, maxjobs=1)
    if num_failures == 0:
        jobset.message(
            'SUCCESS',
            'Archive with local repository created successfully.',
            do_newline=True)
    else:
        jobset.message(
            'FAILED', 'Failed to archive local repository.', do_newline=True)
        sys.exit(1)
Esempio n. 18
0
    extra_args.append('%s' % args.runs_per_test)
    extra_args.append('--quiet_success')
  if args.max_time > 0:
    extra_args.extend(('--max_time', '%d' % args.max_time))

  all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
             _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)

  jobs = []
  for job in all_jobs:
    if not args.filter or all(filter in job.labels for filter in args.filter):
      if not any(exclude_label in job.labels for exclude_label in args.exclude):
        jobs.append(job)

  if not jobs:
    jobset.message('FAILED', 'No test suites match given criteria.',
                   do_newline=True)
    sys.exit(1)

  print('IMPORTANT: The changes you are testing need to be locally committed')
  print('because only the committed changes in the current branch will be')
  print('copied to the docker environment or into subworkspaces.')

  skipped_jobs = []

  if args.filter_pr_tests:
    print('Looking for irrelevant tests to skip...')
    relevant_jobs = filter_tests(jobs, args.base_branch)
    if len(relevant_jobs) == len(jobs):
      print('No tests will be skipped.')
    else:
      print('These tests will be skipped:')
Esempio n. 19
0
    targets += _BUILD_MAP[label]

# Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)]
targets = sorted(set(targets), key=lambda target: target.name)

# Execute pre-build phase
prebuild_jobs = []
for target in targets:
    prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs:
    num_failures, _ = jobset.run(prebuild_jobs,
                                 newline_on_success=True,
                                 maxjobs=args.jobs)
    if num_failures != 0:
        jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
        sys.exit(1)

build_jobs = []
for target in targets:
    build_jobs.append(target.build_jobspec())
if not build_jobs:
    print('Nothing to build.')
    sys.exit(1)

jobset.message('START', 'Building targets.', do_newline=True)
num_failures, resultset = jobset.run(build_jobs,
                                     newline_on_success=True,
                                     maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset,
                                     args.xml_report,
        build_jobs.append(job)
        docker_images[str(l.safename)] = job.tag

# First check if a pre-built image was supplied.
if args.servers_image_tag:
    docker_images[_FAKE_SERVERS_SAFENAME] = args.servers_image_tag
else:
    # Build the test servers in docker and save the fully
    # built image.
    job = build_interop_image_jobspec(_FAKE_SERVERS_SAFENAME,
                                      basename_prefix='lb_interop')
    build_jobs.append(job)
    docker_images[_FAKE_SERVERS_SAFENAME] = job.tag

if build_jobs:
    jobset.message('START', 'Building interop docker images.', do_newline=True)
    print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
    num_failures, _ = jobset.run(build_jobs,
                                 newline_on_success=True,
                                 maxjobs=args.jobs)
    if num_failures == 0:
        jobset.message('SUCCESS',
                       'All docker images built successfully.',
                       do_newline=True)
    else:
        jobset.message('FAILED',
                       'Failed to build interop docker images.',
                       do_newline=True)
        sys.exit(1)

Esempio n. 21
0
        build_jobs.append(job)
        docker_images[str(l.safename)] = job.tag

# First check if a pre-built image was supplied.
if args.servers_image_tag:
    docker_images[_FAKE_SERVERS_SAFENAME] = args.servers_image_tag
else:
    # Build the test servers in docker and save the fully
    # built image.
    job = build_interop_image_jobspec(
        _FAKE_SERVERS_SAFENAME, basename_prefix='lb_interop')
    build_jobs.append(job)
    docker_images[_FAKE_SERVERS_SAFENAME] = job.tag

if build_jobs:
    jobset.message('START', 'Building interop docker images.', do_newline=True)
    print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
    num_failures, _ = jobset.run(
        build_jobs, newline_on_success=True, maxjobs=args.jobs)
    if num_failures == 0:
        jobset.message(
            'SUCCESS', 'All docker images built successfully.', do_newline=True)
    else:
        jobset.message(
            'FAILED', 'Failed to build interop docker images.', do_newline=True)
        sys.exit(1)


def wait_until_dns_server_is_up(dns_server_ip):
    """Probes the DNS server until it's running and safe for tests."""
    for i in range(0, 30):
Esempio n. 22
0
def run_one_scenario(scenario_config):
    jobset.message('START', 'Run scenario: %s' % scenario_config['name'])
    server_jobs = {}
    server_addresses = {}
    suppress_server_logs = True
    try:
        backend_addrs = []
        fallback_ips = []
        grpclb_ips = []
        shortname_prefix = scenario_config['name']
        # Start backends
        for i in xrange(len(scenario_config['backend_configs'])):
            backend_config = scenario_config['backend_configs'][i]
            backend_shortname = shortname(shortname_prefix, 'backend_server', i)
            backend_spec = backend_server_jobspec(
                backend_config['transport_sec'], backend_shortname)
            backend_job = dockerjob.DockerJob(backend_spec)
            server_jobs[backend_shortname] = backend_job
            backend_addrs.append('%s:%d' % (backend_job.ip_address(),
                                            _BACKEND_SERVER_PORT))
        # Start fallbacks
        for i in xrange(len(scenario_config['fallback_configs'])):
            fallback_config = scenario_config['fallback_configs'][i]
            fallback_shortname = shortname(shortname_prefix, 'fallback_server',
                                           i)
            fallback_spec = fallback_server_jobspec(
                fallback_config['transport_sec'], fallback_shortname)
            fallback_job = dockerjob.DockerJob(fallback_spec)
            server_jobs[fallback_shortname] = fallback_job
            fallback_ips.append(fallback_job.ip_address())
        # Start balancers
        for i in xrange(len(scenario_config['balancer_configs'])):
            balancer_config = scenario_config['balancer_configs'][i]
            grpclb_shortname = shortname(shortname_prefix, 'grpclb_server', i)
            grpclb_spec = grpclb_jobspec(balancer_config['transport_sec'],
                                         balancer_config['short_stream'],
                                         backend_addrs, grpclb_shortname)
            grpclb_job = dockerjob.DockerJob(grpclb_spec)
            server_jobs[grpclb_shortname] = grpclb_job
            grpclb_ips.append(grpclb_job.ip_address())
        # Start DNS server
        dns_server_shortname = shortname(shortname_prefix, 'dns_server', 0)
        dns_server_spec = dns_server_in_docker_jobspec(
            grpclb_ips, fallback_ips, dns_server_shortname,
            scenario_config['cause_no_error_no_data_for_balancer_a_record'])
        dns_server_job = dockerjob.DockerJob(dns_server_spec)
        server_jobs[dns_server_shortname] = dns_server_job
        # Get the IP address of the docker container running the DNS server.
        # The DNS server is running on port 53 of that IP address. Note we will
        # point the DNS resolvers of grpc clients under test to our controlled
        # DNS server by effectively modifying the /etc/resolve.conf "nameserver"
        # lists of their docker containers.
        dns_server_ip = dns_server_job.ip_address()
        wait_until_dns_server_is_up(dns_server_ip)
        # Run clients
        jobs = []
        for lang_name in languages:
            # Skip languages that are known to not currently
            # work for this test.
            if not args.no_skips and lang_name in scenario_config.get(
                    'skip_langs', []):
                jobset.message('IDLE',
                               'Skipping scenario: %s for language: %s\n' %
                               (scenario_config['name'], lang_name))
                continue
            lang = _LANGUAGES[lang_name]
            test_job = lb_client_interop_jobspec(
                lang,
                dns_server_ip,
                docker_image=docker_images.get(lang.safename),
                transport_security=scenario_config['transport_sec'])
            jobs.append(test_job)
        jobset.message('IDLE', 'Jobs to run: \n%s\n' % '\n'.join(
            str(job) for job in jobs))
        num_failures, resultset = jobset.run(
            jobs, newline_on_success=True, maxjobs=args.jobs)
        report_utils.render_junit_xml_report(resultset, 'sponge_log.xml')
        if num_failures:
            suppress_server_logs = False
            jobset.message(
                'FAILED',
                'Scenario: %s. Some tests failed' % scenario_config['name'],
                do_newline=True)
        else:
            jobset.message(
                'SUCCESS',
                'Scenario: %s. All tests passed' % scenario_config['name'],
                do_newline=True)
        return num_failures
    finally:
        # Check if servers are still running.
        for server, job in server_jobs.items():
            if not job.is_running():
                print('Server "%s" has exited prematurely.' % server)
        suppress_failure = suppress_server_logs and not args.verbose
        dockerjob.finish_jobs(
            [j for j in six.itervalues(server_jobs)],
            suppress_failure=suppress_failure)
def run_one_scenario(scenario_config):
    jobset.message('START', 'Run scenario: %s' % scenario_config['name'])
    server_jobs = {}
    server_addresses = {}
    suppress_server_logs = True
    try:
        backend_addrs = []
        fallback_ips = []
        grpclb_ips = []
        shortname_prefix = scenario_config['name']
        # Start backends
        for i in xrange(len(scenario_config['backend_configs'])):
            backend_config = scenario_config['backend_configs'][i]
            backend_shortname = shortname(shortname_prefix, 'backend_server',
                                          i)
            backend_spec = backend_server_jobspec(
                backend_config['transport_sec'], backend_shortname)
            backend_job = dockerjob.DockerJob(backend_spec)
            server_jobs[backend_shortname] = backend_job
            backend_addrs.append(
                '%s:%d' % (backend_job.ip_address(), _BACKEND_SERVER_PORT))
        # Start fallbacks
        for i in xrange(len(scenario_config['fallback_configs'])):
            fallback_config = scenario_config['fallback_configs'][i]
            fallback_shortname = shortname(shortname_prefix, 'fallback_server',
                                           i)
            fallback_spec = fallback_server_jobspec(
                fallback_config['transport_sec'], fallback_shortname)
            fallback_job = dockerjob.DockerJob(fallback_spec)
            server_jobs[fallback_shortname] = fallback_job
            fallback_ips.append(fallback_job.ip_address())
        # Start balancers
        for i in xrange(len(scenario_config['balancer_configs'])):
            balancer_config = scenario_config['balancer_configs'][i]
            grpclb_shortname = shortname(shortname_prefix, 'grpclb_server', i)
            grpclb_spec = grpclb_jobspec(balancer_config['transport_sec'],
                                         balancer_config['short_stream'],
                                         backend_addrs, grpclb_shortname)
            grpclb_job = dockerjob.DockerJob(grpclb_spec)
            server_jobs[grpclb_shortname] = grpclb_job
            grpclb_ips.append(grpclb_job.ip_address())
        # Start DNS server
        dns_server_shortname = shortname(shortname_prefix, 'dns_server', 0)
        dns_server_spec = dns_server_in_docker_jobspec(
            grpclb_ips, fallback_ips, dns_server_shortname,
            scenario_config['cause_no_error_no_data_for_balancer_a_record'])
        dns_server_job = dockerjob.DockerJob(dns_server_spec)
        server_jobs[dns_server_shortname] = dns_server_job
        # Get the IP address of the docker container running the DNS server.
        # The DNS server is running on port 53 of that IP address. Note we will
        # point the DNS resolvers of grpc clients under test to our controlled
        # DNS server by effectively modifying the /etc/resolve.conf "nameserver"
        # lists of their docker containers.
        dns_server_ip = dns_server_job.ip_address()
        wait_until_dns_server_is_up(dns_server_ip)
        # Run clients
        jobs = []
        for lang_name in languages:
            # Skip languages that are known to not currently
            # work for this test.
            if not args.no_skips and lang_name in scenario_config.get(
                    'skip_langs', []):
                jobset.message(
                    'IDLE', 'Skipping scenario: %s for language: %s\n' %
                    (scenario_config['name'], lang_name))
                continue
            lang = _LANGUAGES[lang_name]
            test_job = lb_client_interop_jobspec(
                lang,
                dns_server_ip,
                docker_image=docker_images.get(lang.safename),
                transport_security=scenario_config['transport_sec'])
            jobs.append(test_job)
        jobset.message(
            'IDLE',
            'Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
        num_failures, resultset = jobset.run(jobs,
                                             newline_on_success=True,
                                             maxjobs=args.jobs)
        report_utils.render_junit_xml_report(resultset, 'sponge_log.xml')
        if num_failures:
            suppress_server_logs = False
            jobset.message('FAILED',
                           'Scenario: %s. Some tests failed' %
                           scenario_config['name'],
                           do_newline=True)
        else:
            jobset.message('SUCCESS',
                           'Scenario: %s. All tests passed' %
                           scenario_config['name'],
                           do_newline=True)
        return num_failures
    finally:
        # Check if servers are still running.
        for server, job in server_jobs.items():
            if not job.is_running():
                print('Server "%s" has exited prematurely.' % server)
        suppress_failure = suppress_server_logs and not args.verbose
        dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)],
                              suppress_failure=suppress_failure)
Esempio n. 24
0
    # languages for which to build docker images
    languages_to_build = set(_LANGUAGES[k]
                             for k in set([str(l) for l in languages] +
                                          [s for s in servers]))
    if args.http2_interop:
        languages_to_build.add(http2Interop)

    build_jobs = []
    for l in languages_to_build:
        job = build_interop_image_jobspec(l)
        docker_images[str(l)] = job.tag
        build_jobs.append(job)

    if build_jobs:
        jobset.message('START',
                       'Building interop docker images.',
                       do_newline=True)
        num_failures, _ = jobset.run(build_jobs,
                                     newline_on_success=True,
                                     maxjobs=args.jobs)
        if num_failures == 0:
            jobset.message('SUCCESS',
                           'All docker images built successfully.',
                           do_newline=True)
        else:
            jobset.message('FAILED',
                           'Failed to build interop docker images.',
                           do_newline=True)
            for image in docker_images.itervalues():
                dockerjob.remove_image(image, skip_nonexistent=True)
            sys.exit(1)
Esempio n. 25
0
        extra_args.append('--quiet_success')

    all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
               _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)

    jobs = []
    for job in all_jobs:
        if not args.filter or all(filter in job.labels
                                  for filter in args.filter):
            if not any(exclude_label in job.labels
                       for exclude_label in args.exclude):
                jobs.append(job)

    if not jobs:
        jobset.message('FAILED',
                       'No test suites match given criteria.',
                       do_newline=True)
        sys.exit(1)

    print(
        'IMPORTANT: The changes you are testing need to be locally committed')
    print('because only the committed changes in the current branch will be')
    print('copied to the docker environment or into subworkspaces.')

    skipped_jobs = []

    if args.filter_pr_tests:
        print('Looking for irrelevant tests to skip...')
        relevant_jobs = filter_tests(jobs, args.base_branch)
        if len(relevant_jobs) == len(jobs):
            print('No tests will be skipped.')
Esempio n. 26
0
for label in args.build:
  targets += _BUILD_MAP[label]

# Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)]
targets = sorted(set(targets))

# Execute pre-build phase
prebuild_jobs = []
for target in targets:
  prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs:
  num_failures, _ = jobset.run(
    prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
  if num_failures != 0:
    jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
    sys.exit(1)

build_jobs = []
for target in targets:
  build_jobs.append(target.build_jobspec())
if not build_jobs:
  print('Nothing to build.')
  sys.exit(1)

jobset.message('START', 'Building targets.', do_newline=True)
num_failures, _ = jobset.run(
    build_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures == 0:
  jobset.message('SUCCESS', 'All targets built successfully.',
                 do_newline=True)
Esempio n. 27
0
    languages_to_build.add(http2Interop)

  if args.http2_server_interop:
    languages_to_build.add(http2InteropServer)

  build_jobs = []
  for l in languages_to_build:
    if str(l) == 'objc':
      # we don't need to build a docker image for objc
      continue
    job = build_interop_image_jobspec(l)
    docker_images[str(l)] = job.tag
    build_jobs.append(job)

  if build_jobs:
    jobset.message('START', 'Building interop docker images.', do_newline=True)
    if args.verbose:
      print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))

    num_failures, _ = jobset.run(
        build_jobs, newline_on_success=True, maxjobs=args.jobs)
    if num_failures == 0:
      jobset.message('SUCCESS', 'All docker images built successfully.',
                     do_newline=True)
    else:
      jobset.message('FAILED', 'Failed to build interop docker images.',
                     do_newline=True)
      for image in six.itervalues(docker_images):
        dockerjob.remove_image(image, skip_nonexistent=True)
      sys.exit(1)