Exemplo n.º 1
0
def main() -> None:
    language_choices = sorted(scenario_config.LANGUAGES.keys())
    argp = argparse.ArgumentParser(
        description='Generates load test configs from a template.',
        fromfile_prefix_chars='@')
    argp.add_argument('-l',
                      '--language',
                      action='append',
                      choices=language_choices,
                      required=True,
                      help='Language(s) to benchmark.',
                      dest='languages')
    argp.add_argument('-t',
                      '--template',
                      type=str,
                      required=True,
                      help='LoadTest configuration yaml file template.')
    argp.add_argument('-s',
                      '--substitution',
                      action='append',
                      default=[],
                      help='Template substitution(s), in the form key=value.',
                      dest='substitutions')
    argp.add_argument('-p',
                      '--prefix',
                      default='',
                      type=str,
                      help='Test name prefix.')
    argp.add_argument('-u',
                      '--uniquifier_element',
                      action='append',
                      default=[],
                      help='String element(s) to make the test name unique.',
                      dest='uniquifier_elements')
    argp.add_argument(
        '-d',
        action='store_true',
        help='Use creation date and time as an additional uniquifier element.')
    argp.add_argument('-a',
                      '--annotation',
                      action='append',
                      default=[],
                      help='metadata.annotation(s), in the form key=value.',
                      dest='annotations')
    argp.add_argument('-r',
                      '--regex',
                      default='.*',
                      type=str,
                      help='Regex to select scenarios to run.')
    argp.add_argument(
        '--category',
        choices=['all', 'inproc', 'scalable', 'smoketest', 'sweep'],
        default='all',
        help='Select a category of tests to run.')
    argp.add_argument(
        '--allow_client_language',
        action='append',
        choices=language_choices,
        default=[],
        help='Allow cross-language scenarios with this client language.',
        dest='allow_client_languages')
    argp.add_argument(
        '--allow_server_language',
        action='append',
        choices=language_choices,
        default=[],
        help='Allow cross-language scenarios with this server language.',
        dest='allow_server_languages')
    argp.add_argument('--instances_per_client',
                      default=1,
                      type=int,
                      help="Number of instances to generate for each client.")
    argp.add_argument('--runs_per_test',
                      default=1,
                      type=int,
                      help='Number of copies to generate for each test.')
    argp.add_argument('-o',
                      '--output',
                      type=str,
                      help='Output file name. Output to stdout if not set.')
    args = argp.parse_args()

    if args.instances_per_client < 1:
        argp.error('instances_per_client must be greater than zero.')

    if args.runs_per_test < 1:
        argp.error('runs_per_test must be greater than zero.')

    # Config generation ignores environment variables that are passed by the
    # controller at runtime.
    substitutions = {
        'DRIVER_PORT': '${DRIVER_PORT}',
        'KILL_AFTER': '${KILL_AFTER}',
        'POD_TIMEOUT': '${POD_TIMEOUT}',
    }

    # The user can override the ignored variables above by passing them in as
    # substitution keys.
    substitutions.update(parse_key_value_args(args.substitutions))

    uniquifier_elements = args.uniquifier_elements
    if args.d:
        uniquifier_elements.append(now_string())

    annotations = parse_key_value_args(args.annotations)

    with open(args.template) as f:
        base_config = yaml.safe_load(
            string.Template(f.read()).substitute(substitutions))

    clear_empty_fields(base_config)

    spec = base_config['spec']
    base_config_clients = spec['clients']
    del spec['clients']
    base_config_servers = spec['servers']
    del spec['servers']

    client_languages = [''] + args.allow_client_languages
    server_languages = [''] + args.allow_server_languages
    config_generators = []
    for l, cl, sl in itertools.product(args.languages, client_languages,
                                       server_languages):
        language_config = scenario_config_exporter.LanguageConfig(
            category=args.category,
            language=l,
            client_language=cl,
            server_language=sl)
        config_generators.append(
            gen_loadtest_configs(
                base_config,
                base_config_clients,
                base_config_servers,
                args.regex,
                language_config,
                loadtest_name_prefix=args.prefix,
                uniquifier_elements=uniquifier_elements,
                annotations=annotations,
                instances_per_client=args.instances_per_client,
                runs_per_test=args.runs_per_test))
    configs = (config for config in itertools.chain(*config_generators))

    with open(args.output, 'w') if args.output else sys.stdout as f:
        yaml.dump_all(configs,
                      stream=f,
                      Dumper=config_dumper(
                          CONFIGURATION_FILE_HEADER_COMMENT.strip()),
                      default_flow_style=False)
Exemplo n.º 2
0
def main() -> None:
    language_choices = sorted(scenario_config.LANGUAGES.keys())
    argp = argparse.ArgumentParser(
        description='Generates load test configs from a template.',
        fromfile_prefix_chars='@')
    argp.add_argument('-l',
                      '--language',
                      action='append',
                      choices=language_choices,
                      required=True,
                      help='Language(s) to benchmark.',
                      dest='languages')
    argp.add_argument('-t',
                      '--template',
                      type=str,
                      required=True,
                      help='LoadTest configuration yaml file template.')
    argp.add_argument('-s',
                      '--substitution',
                      action='append',
                      default=[],
                      help='Template substitution(s), in the form key=value.',
                      dest='substitutions')
    argp.add_argument('-p',
                      '--prefix',
                      default='',
                      type=str,
                      help='Test name prefix.')
    argp.add_argument('-u',
                      '--uniquifier_element',
                      action='append',
                      default=[],
                      help='String element(s) to make the test name unique.',
                      dest='uniquifier_elements')
    argp.add_argument(
        '-d',
        action='store_true',
        help='Use creation date and time as an addditional uniquifier element.'
    )
    argp.add_argument('-a',
                      '--annotation',
                      action='append',
                      default=[],
                      help='metadata.annotation(s), in the form key=value.',
                      dest='annotations')
    argp.add_argument('-r',
                      '--regex',
                      default='.*',
                      type=str,
                      help='Regex to select scenarios to run.')
    argp.add_argument(
        '--category',
        choices=['all', 'inproc', 'scalable', 'smoketest', 'sweep'],
        default='all',
        help='Select a category of tests to run.')
    argp.add_argument(
        '--allow_client_language',
        action='append',
        choices=language_choices,
        default=[],
        help='Allow cross-language scenarios with this client language.',
        dest='allow_client_languages')
    argp.add_argument(
        '--allow_server_language',
        action='append',
        choices=language_choices,
        default=[],
        help='Allow cross-language scenarios with this server language.',
        dest='allow_server_languages')
    argp.add_argument('--runs_per_test',
                      default=1,
                      type=int,
                      help='Number of copies to generate for each test.')
    argp.add_argument('-o',
                      '--output',
                      type=str,
                      help='Output file name. Output to stdout if not set.')
    args = argp.parse_args()

    substitutions = parse_key_value_args(args.substitutions)

    uniquifier_elements = args.uniquifier_elements
    if args.d:
        uniquifier_elements.append(now_string())

    annotations = parse_key_value_args(args.annotations)

    with open(args.template) as f:
        base_config = yaml.safe_load(
            string.Template(f.read()).substitute(substitutions))

    spec = base_config['spec']
    base_config_clients = spec['clients']
    del spec['clients']
    base_config_servers = spec['servers']
    del spec['servers']

    client_languages = [''] + args.allow_client_languages
    server_languages = [''] + args.allow_server_languages
    config_generators = []
    for l, cl, sl in itertools.product(args.languages, client_languages,
                                       server_languages):
        language_config = scenario_config_exporter.LanguageConfig(
            category=args.category,
            language=l,
            client_language=cl,
            server_language=sl)
        config_generators.append(
            gen_loadtest_configs(base_config,
                                 base_config_clients,
                                 base_config_servers,
                                 args.regex,
                                 language_config,
                                 loadtest_name_prefix=args.prefix,
                                 uniquifier_elements=uniquifier_elements,
                                 annotations=annotations,
                                 runs_per_test=args.runs_per_test))
    configs = (config for config in itertools.chain(*config_generators))

    with open(args.output, 'w') if args.output else sys.stdout as f:
        yaml.dump_all(configs,
                      stream=f,
                      Dumper=config_dumper(
                          CONFIGURATION_FILE_HEADER_COMMENT.strip()))