Пример #1
0
def read_user_config(flags):
    """Read the user config from disk and return it.

    Args:
        flags (argparse.Namespace): The flags from sys.argv.

    Returns:
        dict: The user config.
    """
    # Load the user configuration if it exists and save a dictionary.
    user_config = {}
    user_config_file = os.path.realpath(os.path.expanduser(flags.user_config))
    if os.path.isfile(user_config_file):
        with io.open(user_config_file) as ucf:
            user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {}

    # Sanity check: Is there a configuration? If not, abort.
    if not user_config:
        setup_logging(INFO)
        logger.critical('No user configuration found.')
        logger.warn('This is probably your first time running Artman.')
        logger.warn('Run `configure-artman` to get yourself set up.')
        sys.exit(64)

    # Done; return the user config.
    return user_config
Пример #2
0
def read_user_config(flags):
    """Read the user config from disk and return it.

    Args:
        flags (argparse.Namespace): The flags from sys.argv.

    Returns:
        dict: The user config.
    """
    # Load the user configuration if it exists and save a dictionary.
    user_config = {}
    user_config_file = os.path.realpath(os.path.expanduser(flags.user_config))
    if os.path.isfile(user_config_file):
        with io.open(user_config_file) as ucf:
            user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {}

    # Sanity check: Is there a configuration? If not, abort.
    if not user_config:
        setup_logging(INFO)
        logger.critical('No user configuration found.')
        logger.warn('This is probably your first time running Artman.')
        logger.warn('Run `configure-artman` to get yourself set up.')
        sys.exit(64)

    # Done; return the user config.
    return user_config
Пример #3
0
def configure(log_level=logging.INFO):
    """Allow the user to write a new configuration file.

    Returns:
        int: An exit status.
    """
    user_config = UserConfig()

    # Walk the user through basic configuration.
    setup_logging(log_level)
    logger.info('Welcome to artman. We will get you configured.')
    logger.info(
        'When this is done, config will be stored in ~/.artman/config.yaml.')
    logger.info('')

    # Go through each step.
    # These are split out to make testing them easier.
    user_config.local.CopyFrom(_configure_local_config())

    try:
        config_dir = os.path.expanduser('~/.artman/')
        os.makedirs(config_dir)
    except OSError:
        pass
    _write_pb_to_yaml(user_config, os.path.join(config_dir, 'config.yaml'))
    logger.success('Configuration written successfully to '
                   '~/.artman/config.yaml.')
Пример #4
0
def configure(log_level=logging.INFO):
    """Allow the user to write a new configuration file.

    Returns:
        int: An exit status.
    """
    user_config = UserConfig()

    # Walk the user through basic configuration.
    setup_logging(log_level)
    logger.info('Welcome to artman. We will get you configured.')
    logger.info('When this is done, config will be stored in ~/.artman/config.yaml.')
    logger.info('')

    # Go through each step.
    # These are split out to make testing them easier.
    user_config.local.CopyFrom(_configure_local_config())

    try:
        config_dir = os.path.expanduser('~/.artman/')
        os.makedirs(config_dir)
    except OSError:
        pass
    _write_pb_to_yaml(user_config, os.path.join(config_dir, 'config.yaml'))
    logger.success('Configuration written successfully to '
                   '~/.artman/config.yaml.')
Пример #5
0
def _parse_args(*args):
    parser = _CreateArgumentParser()
    flags = parser.parse_args(args=args)
    if not flags.queue_name:
        setup_logging(INFO)
        logger.critical('Required --queue-name flag not specified')
        sys.exit(1)
    return flags
Пример #6
0
def _parse_args(*args):
    parser = _CreateArgumentParser()
    flags = parser.parse_args(args=args)
    if not flags.queue_name:
        setup_logging(INFO)
        logger.critical('Required --queue-name flag not specified')
        sys.exit(1)
    return flags
Пример #7
0
def configure(log_level=logging.INFO):
    """Allow the user to write a new configuration file.

    Returns:
        int: An exit status.
    """
    user_config = {}

    # Walk the user through basic configuration.
    setup_logging(log_level)
    logger.info('Welcome to artman. We will get you configured.')
    logger.info('When this is done, config will be stored in ~/.artman/config.yaml.')
    logger.info('')

    # Go through each step.
    # These are split out to make testing them easier.
    user_config['local_paths'] = _configure_local_paths(
        user_config.get('local_paths', {}),
    )
    user_config['publish'] = _configure_publish()
    if user_config['publish'] == 'github':
        user_config['github'] = _configure_github(
            user_config.get('github', {}),
        )

    # Write the final configuration.
    config_yaml = yaml.dump(user_config,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    if isinstance(config_yaml, six.binary_type):
        config_yaml = config_yaml.decode('utf8')
    try:
        os.makedirs(os.path.expanduser('~/.artman/'))
    except OSError:
        pass
    with io.open(os.path.expanduser('~/.artman/config.yaml'), 'w+') as file_:
        file_.write(u'---\n')
        file_.write(config_yaml)
    logger.success('Configuration written successfully to '
                   '~/.artman/config.yaml.')
Пример #8
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    pipeline_args['toolkit_path'] = user_config.local.toolkit
    pipeline_args['generator_args'] = flags.generator_args

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.aspect)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    pipeline_args['aspect'] = Artifact.Aspect.Name(artifact_config.aspect)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
        if os.path.abspath(flags.output_dir) != os.path.abspath(DEFAULT_OUTPUT_DIR):
            logger.warning("`output_dir` is ignored in DiscoGapicConfigGen. "
             + "Yamls are saved at the path specified by `gapic_yaml`.")
        pipeline_args['output_dir'] = tempfile.mkdtemp()
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    config_args = config_util.load_config_spec(legacy_config_dict, language)
    config_args.update(pipeline_args)
    pipeline_args = config_args
    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    return pipeline_name, pipeline_args
Пример #9
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    pipeline_args['toolkit'] = user_config.local.toolkit

    if flags.subcommand == 'publish' and flags.local_repo_dir:
        if not flags.dry_run:
            logger.error('`--dry-run` flag must be passed when '
                         '`--local-repo-dir` is specified')
            sys.exit(96)
        flags.local_repo_dir = os.path.abspath(flags.local_repo_dir)
        pipeline_args['local_repo_dir'] = flags.local_repo_dir

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    shared_config_name = 'common.yaml'
    if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
        shared_config_name = 'doc.yaml'

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))
    tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
    with io.open(tmp_legacy_config_yaml, 'w') as outfile:
        yaml.dump(legacy_config_dict, outfile, default_flow_style=False)

    config = ','.join([
        '{artman_config_path}',
        '{googleapis}/gapic/lang/{shared_config_name}',
    ]).format(
        artman_config_path=tmp_legacy_config_yaml,
        googleapis=root_dir,
        shared_config_name=shared_config_name,
    )

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    # Note: the var replacement is still needed because they are still being
    # used in some shared/common config yamls.
    config_sections = ['common']
    for config_spec in config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                'GOOGLEAPIS': root_dir,
                'DISCOVERY_ARTIFACT_MANAGER': root_dir,
                'TOOLKIT': user_config.local.toolkit
            },
            language=language, )
        pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            if flags.dry_run:
                pipeline_args['publish'] = 'local'
            else:
                pipeline_args['publish'] = 'github'
                pipeline_args['github'] = support.parse_github_credentials(
                    argv_flags=flags,
                    github_config=user_config.github)
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Clean up the tmp legacy artman config.
    os.remove(tmp_legacy_config_yaml)

    # Return the final arguments.
    return pipeline_name, pipeline_args
Пример #10
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from ~/.artman/config.yaml.

    Returns:
        tuple (str, dict, str): 3-tuple containing:
            - pipeline name
            - pipeline arguments
            - 'remote' or None
    """
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(user_config,
                                                             flags.googleapis)

    # In most cases, we get a language.
    if flags.language:
        pipeline_args['language'] = flags.language
    elif flags.pipeline_name != 'GapicConfigPipeline':
        logger.critical('--language is required for every pipeline except '
                        'GapicConfigPipeline.')
        sys.exit(64)

    # If this is remote execution, configure that.
    if flags.remote:
        pipeline_id = str(uuid.uuid4())
        # Use a unique random temp directory for remote execution.
        # TODO(ethanbao): Let remote artman decide the temp directory.
        pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format(
            id=pipeline_id,
        )
        pipeline_args['pipeline_id'] = pipeline_id

    # Specify the default pipeline settings - this may change if
    # BATCH is set
    default_pipeline = 'GapicClientPipeline'

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    if flags.api:
        shared_config_name = 'common.yaml'
        if flags.language == 'ruby':
            shared_config_name = 'doc.yaml'

        googleapis = os.path.realpath(os.path.expanduser(
            pipeline_args['local_paths']['googleapis'],
        ))
        flags.config = ','.join([
            '{googleapis}/gapic/api/artman_{api}.yaml',
            '{googleapis}/gapic/lang/{shared_config_name}',
        ]).format(
            api=flags.api,
            googleapis=googleapis,
            shared_config_name=shared_config_name,
        )
    elif flags.batch:
        googleapis = os.path.realpath(os.path.expanduser(
            pipeline_args['local_paths']['googleapis'],
        ))
        flags.config = '{googleapis}/gapic/batch/common.yaml'.format(
            googleapis=googleapis,
        )
        default_pipeline = 'GapicClientBatchPipeline'
        if not flags.publish:
            # If publish flag was not set by the user, set it here.
            # This prevents the user config yaml from causing a
            # publish event when batch mode is used.
            flags.publish = 'noop'
        if flags.target:
            logger.critical('--target and --batch cannot both be specified; '
                            'when using --batch, the repo must be the default '
                            'specified in the artman config yaml file (or '
                            'staging if no default is provided).')
            sys.exit(64)

    # Set the pipeline if none was specified
    if not flags.pipeline_name:
        flags.pipeline_name = default_pipeline

    # Determine where to publish.
    pipeline_args['publish'] = support.resolve('publish', user_config, flags,
        default='local',
    )

    # Parse out the GitHub credentials iff we are publishing to GitHub.
    if pipeline_args['publish'] == 'github':
        pipeline_args['github'] = support.parse_github_credentials(
            argv_flags=flags,
            config=user_config.get('github', {}),
        )

    # Parse out the full configuration.
    config_sections = ['common']
    for config_spec in flags.config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={k.upper(): v for k, v in
                       pipeline_args['local_paths'].items()},
            language=flags.language,
        )
        pipeline_args.update(config_args)

    # Add any arbitrary keyword arguments.
    if flags.pipeline_kwargs != '{}':
        logger.warn('The use of --pipeline-kwargs is discouraged.')
        cmd_args = ast.literal_eval(flags.pipeline_kwargs)
        pipeline_args.update(cmd_args)

    # Coerce `git_repos` and `target_repo` into a single git_repo.
    if pipeline_args['publish'] in ('github', 'local') and not flags.batch:
        # Temporarily give our users a nice error if they have an older
        # googleapis checkout.
        # DEPRECATED: 2017-04-20
        # REMOVE: 2017-05-20
        if 'git_repo' in pipeline_args:
            logger.error('Your git repos are configured in your artman YAML '
                         'using a older format. Please git pull.')
            sys.exit(96)

        # Pop the git repos off of the pipeline args and select the
        # correct one.
        repos = pipeline_args.pop('git_repos')
        pipeline_args['git_repo'] = support.select_git_repo(repos, flags.target)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    # This includes a pipeline to run, arguments, and whether to run remotely.
    return (
        flags.pipeline_name,
        pipeline_args,
        'remote' if flags.remote else None,
    )
Пример #11
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    pipeline_args['toolkit_path'] = user_config.local.toolkit

    if flags.subcommand == 'publish' and flags.local_repo_dir:
        if not flags.dry_run:
            logger.error('`--dry-run` flag must be passed when '
                         '`--local-repo-dir` is specified')
            sys.exit(96)
        flags.local_repo_dir = os.path.abspath(flags.local_repo_dir)
        pipeline_args['local_repo_dir'] = flags.local_repo_dir

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.aspect)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    pipeline_args['aspect'] = Artifact.Aspect.Name(artifact_config.aspect)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    config_args = config_util.load_config_spec(legacy_config_dict, language)
    pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            if flags.dry_run:
                pipeline_args['publish'] = 'local'
            else:
                pipeline_args['publish'] = 'github'
                pipeline_args['github'] = support.parse_github_credentials(
                    argv_flags=flags,
                    github_config=user_config.github)
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    return pipeline_name, pipeline_args
Пример #12
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    # TODO two args reference the same concept - clean this up
    pipeline_args['toolkit'] = user_config.local.toolkit
    pipeline_args['toolkit_path'] = user_config.local.toolkit

    if flags.subcommand == 'publish' and flags.local_repo_dir:
        if not flags.dry_run:
            logger.error('`--dry-run` flag must be passed when '
                         '`--local-repo-dir` is specified')
            sys.exit(96)
        flags.local_repo_dir = os.path.abspath(flags.local_repo_dir)
        pipeline_args['local_repo_dir'] = flags.local_repo_dir

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error('Artman config file `%s` doesn\'t exist.' %
                     artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(artman_config_path,
                                                      flags.artifact_name)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))

    language = Artifact.Language.Name(artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    config_args = config_util.load_config_spec(legacy_config_dict, language)
    pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            if flags.dry_run:
                pipeline_args['publish'] = 'local'
            else:
                pipeline_args['publish'] = 'github'
                pipeline_args['github'] = support.parse_github_credentials(
                    argv_flags=flags, github_config=user_config.github)
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    return pipeline_name, pipeline_args
Пример #13
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from ~/.artman/config.yaml.

    Returns:
        tuple (str, dict, str): 3-tuple containing:
            - pipeline name
            - pipeline arguments
            - 'remote' or None
    """
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(
        user_config, flags.googleapis)

    # In most cases, we get a language.
    if flags.language:
        pipeline_args['language'] = flags.language
    elif flags.pipeline_name != 'GapicConfigPipeline':
        logger.critical('--language is required for every pipeline except '
                        'GapicConfigPipeline.')
        sys.exit(64)

    # If this is remote execution, configure that.
    if flags.remote:
        pipeline_id = str(uuid.uuid4())
        # Use a unique random temp directory for remote execution.
        # TODO(ethanbao): Let remote artman decide the temp directory.
        pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format(
            id=pipeline_id, )
        pipeline_args['pipeline_id'] = pipeline_id

    # Specify the default pipeline settings - this may change if
    # BATCH is set
    default_pipeline = 'GapicClientPipeline'

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    if flags.api:
        shared_config_name = 'common.yaml'
        if flags.language in (
                'ruby',
                'nodejs',
        ):
            shared_config_name = 'doc.yaml'

        googleapis = os.path.realpath(
            os.path.expanduser(pipeline_args['local_paths']['googleapis'], ))
        flags.config = ','.join([
            '{googleapis}/gapic/api/artman_{api}.yaml',
            '{googleapis}/gapic/lang/{shared_config_name}',
        ]).format(
            api=flags.api,
            googleapis=googleapis,
            shared_config_name=shared_config_name,
        )
    elif flags.batch:
        googleapis = os.path.realpath(
            os.path.expanduser(pipeline_args['local_paths']['googleapis'], ))
        flags.config = '{googleapis}/gapic/batch/common.yaml'.format(
            googleapis=googleapis, )
        default_pipeline = 'GapicClientBatchPipeline'
        if not flags.publish:
            # If publish flag was not set by the user, set it here.
            # This prevents the user config yaml from causing a
            # publish event when batch mode is used.
            flags.publish = 'noop'
        if flags.target:
            logger.critical('--target and --batch cannot both be specified; '
                            'when using --batch, the repo must be the default '
                            'specified in the artman config yaml file (or '
                            'staging if no default is provided).')
            sys.exit(64)

    # Set the pipeline if none was specified
    if not flags.pipeline_name:
        flags.pipeline_name = default_pipeline

    # Determine where to publish.
    pipeline_args['publish'] = support.resolve(
        'publish',
        user_config,
        flags,
        default='local',
    )

    # Parse out the GitHub credentials iff we are publishing to GitHub.
    if pipeline_args['publish'] == 'github':
        pipeline_args['github'] = support.parse_github_credentials(
            argv_flags=flags,
            config=user_config.get('github', {}),
        )

    # Parse out the full configuration.
    config_sections = ['common']
    for config_spec in flags.config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                k.upper(): v
                for k, v in pipeline_args['local_paths'].items()
            },
            language=flags.language,
        )
        pipeline_args.update(config_args)

    # Add any arbitrary keyword arguments.
    if flags.pipeline_kwargs != '{}':
        logger.warn('The use of --pipeline-kwargs is discouraged.')
        cmd_args = ast.literal_eval(flags.pipeline_kwargs)
        pipeline_args.update(cmd_args)

    # Coerce `git_repos` and `target_repo` into a single git_repo.
    if pipeline_args['publish'] in ('github', 'local') and not flags.batch:
        # Temporarily give our users a nice error if they have an older
        # googleapis checkout.
        # DEPRECATED: 2017-04-20
        # REMOVE: 2017-05-20
        if 'git_repo' in pipeline_args:
            logger.error('Your git repos are configured in your artman YAML '
                         'using a older format. Please git pull.')
            sys.exit(96)

        # Pop the git repos off of the pipeline args and select the
        # correct one.
        repos = pipeline_args.pop('git_repos')
        pipeline_args['git_repo'] = support.select_git_repo(
            repos, flags.target)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    # This includes a pipeline to run, arguments, and whether to run remotely.
    return (
        flags.pipeline_name,
        pipeline_args,
        'remote' if flags.remote else None,
    )
Пример #14
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    flags.input_dir = os.path.abspath(flags.input_dir)
    flags.output_dir = os.path.abspath(flags.output_dir)
    flags.config = os.path.abspath(flags.config)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(
        user_config, flags.input_dir)

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.input_dir)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    shared_config_name = 'common.yaml'
    if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
        shared_config_name = 'doc.yaml'

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, flags.input_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))
    tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
    with io.open(tmp_legacy_config_yaml, 'w') as outfile:
        yaml.dump(legacy_config_dict, outfile, default_flow_style=False)

    googleapis = os.path.realpath(
        os.path.expanduser(
            pipeline_args['local_paths']['googleapis'], ))
    config = ','.join([
        '{artman_config_path}',
        '{googleapis}/gapic/lang/{shared_config_name}',
    ]).format(
        artman_config_path=tmp_legacy_config_yaml,
        googleapis=googleapis,
        shared_config_name=shared_config_name, )

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline as well as package_type and packaging
    artifact_type = artifact_config.type
    if artifact_type in (Artifact.GAPIC, Artifact.GAPIC_ONLY):
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type in (Artifact.GRPC, Artifact.GRPC_COMMON):
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    # Note: the var replacement is still needed because they are still being
    # used in some shared/common config yamls.
    config_sections = ['common']
    for config_spec in config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                k.upper(): v
                for k, v in pipeline_args['local_paths'].items()
            },
            language=language, )
        pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            pipeline_args['publish'] = 'local' if flags.dry_run else 'github'
            pipeline_args['github'] = support.parse_github_credentials(
                argv_flags=flags,
                config=user_config.get('github', {}), )
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Clean up the tmp legacy artman config.
    os.remove(tmp_legacy_config_yaml)

    # Return the final arguments.
    return pipeline_name, pipeline_args
Пример #15
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.input_dir:
        flags.input_dir = os.path.abspath(flags.input_dir)
    flags.output_dir = os.path.abspath(flags.output_dir)
    flags.config = os.path.abspath(flags.config)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(
        user_config, flags.input_dir)

    # Save the input directory back to flags if it was not explicitly set.
    if not flags.input_dir:
        flags.input_dir = pipeline_args['local_paths']['googleapis']

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.input_dir)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    shared_config_name = 'common.yaml'
    if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
        shared_config_name = 'doc.yaml'

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, flags.input_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))
    tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
    with io.open(tmp_legacy_config_yaml, 'w') as outfile:
        yaml.dump(legacy_config_dict, outfile, default_flow_style=False)

    googleapis = os.path.realpath(
        os.path.expanduser(
            pipeline_args['local_paths']['googleapis'], ))
    config = ','.join([
        '{artman_config_path}',
        '{googleapis}/gapic/lang/{shared_config_name}',
    ]).format(
        artman_config_path=tmp_legacy_config_yaml,
        googleapis=googleapis,
        shared_config_name=shared_config_name,
    )

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline as well as package_type and packaging
    artifact_type = artifact_config.type
    if artifact_type in (Artifact.GAPIC, Artifact.GAPIC_ONLY):
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type in (Artifact.GRPC, Artifact.GRPC_COMMON):
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    # Note: the var replacement is still needed because they are still being
    # used in some shared/common config yamls.
    config_sections = ['common']
    for config_spec in config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                k.upper(): v
                for k, v in pipeline_args['local_paths'].items()
            },
            language=language, )
        pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            pipeline_args['publish'] = 'local' if flags.dry_run else 'github'
            pipeline_args['github'] = support.parse_github_credentials(
                argv_flags=flags,
                config=user_config.get('github', {}), )
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Clean up the tmp legacy artman config.
    os.remove(tmp_legacy_config_yaml)

    # Return the final arguments.
    return pipeline_name, pipeline_args