def _pull_and_execute_tasks(task_client, queue_name):
    pull_task_response = _pull_task(task_client, queue_name)
    tasks = pull_task_response.get('tasks', [])
    if not tasks:
        # Sleep for 30 seconds if there is no tasks returned.
        logger.debug('There is no pending task. Sleep for 10 seconds.')
        time.sleep(10)
    for task in tasks:
        task_id, tmp_root, artman_user_config, log_file_path = _prepare_dir()
        log_file_handler = None
        try:
            log_file_handler = _setup_logger(log_file_path)
            logger.info('Starting to execute task %s' % task)
            if int(task['taskStatus']['attemptDispatchCount']) > MAX_ATTEMPTS:
                logger.info('Delete task which exceeds max attempts.')
                _delete_task(task_client, task)
                continue
            _execute_task(artman_user_config, task)
            _ack_task(task_client, task)
            logger.info('Task execution finished')
        except Exception as e:
            logger.error('\n'.join(traceback.format_tb(sys.exc_info()[2])))
            _cancel_task_lease(task_client, task)
        finally:
            logger.info('Cleanup tmp directory %s' % tmp_root)
            # Use task id as log name
            _write_to_cloud_logging(task_id, log_file_path)
            _cleanup(tmp_root, log_file_handler)
示例#2
0
文件: main.py 项目: znutsd37/artman
def main(*args):
    """Main method of artman."""
    # If no arguments are sent, we are using the entry point; derive
    # them from sys.argv.
    if not args:
        args = sys.argv[1:]

    # Get to a normalized set of arguments.
    flags = parse_args(*args)
    user_config = loader.read_user_config(flags.user_config)
    _adjust_root_dir(flags.root_dir)
    pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)

    if flags.local:
        try:
            pipeline = pipeline_factory.make_pipeline(pipeline_name,
                                                      **pipeline_kwargs)
            # Hardcoded to run pipeline in serial engine, though not necessarily.
            engine = engines.load(
                pipeline.flow, engine='serial', store=pipeline.kwargs)
            engine.run()
        except:
            logger.error(traceback.format_exc())
            sys.exit(32)
        finally:
            _change_owner(flags, pipeline_name, pipeline_kwargs)
    else:
        support.check_docker_requirements(flags.image)
        # Note: artman currently won't work if input directory doesn't contain
        # common-protos.
        logger.info('Running artman command in a Docker instance.')
        _run_artman_in_docker(flags)
示例#3
0
def main(*args):
    """Main method of artman."""
    # If no arguments are sent, we are using the entry point; derive
    # them from sys.argv.
    if not args:
        args = sys.argv[1:]

    # Get to a normalized set of arguments.
    flags = parse_args(*args)
    user_config = loader.read_user_config(flags.user_config)
    _adjust_root_dir(flags.root_dir)
    pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)

    if flags.local:
        try:
            pipeline = pipeline_factory.make_pipeline(pipeline_name, False,
                                                      **pipeline_kwargs)
            # Hardcoded to run pipeline in serial engine, though not necessarily.
            engine = engines.load(
                pipeline.flow, engine='serial', store=pipeline.kwargs)
            engine.run()
        except:
            logger.error(traceback.format_exc())
            sys.exit(32)
        finally:
            _change_owner(flags, pipeline_name, pipeline_kwargs)
    else:
        support.check_docker_requirements(flags.image)
        # Note: artman currently won't work if input directory doesn't contain
        # shared configuration files (e.g. gapic/packaging/dependencies.yaml).
        # This will make artman less useful for non-Google APIs.
        # TODO(ethanbao): Fix that by checking the input directory and
        # pulling the shared configuration files if necessary.
        logger.info('Running artman command in a Docker instance.')
        _run_artman_in_docker(flags)
示例#4
0
def main(*args):
    """Main method of artman."""
    # If no arguments are sent, we are using the entry point; derive
    # them from sys.argv.
    if not args:
        args = sys.argv[1:]

    # Get to a normalized set of arguments.
    flags = parse_args(*args)
    user_config = loader.read_user_config(flags.user_config)
    _adjust_root_dir(flags.root_dir)
    pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)

    if flags.local:
        try:
            pipeline = pipeline_factory.make_pipeline(pipeline_name,
                                                      **pipeline_kwargs)
            # Hardcoded to run pipeline in serial engine, though not necessarily.
            engine = engines.load(
                pipeline.flow, engine='serial', store=pipeline.kwargs)
            engine.run()
        except:
            logger.error(traceback.format_exc())
            sys.exit(32)
        finally:
            _change_owner(flags, pipeline_name, pipeline_kwargs)
    else:
        support.check_docker_requirements(flags.image)
        # Note: artman currently won't work if input directory doesn't contain
        # common-protos.
        logger.info('Running artman command in a Docker instance.')
        _run_artman_in_docker(flags)
示例#5
0
def _pull_and_execute_tasks(task_client, queue_name):
    pull_task_response = _pull_task(task_client, queue_name)
    tasks = pull_task_response.get('tasks', [])
    if not tasks:
        # Sleep for 30 seconds if there is no tasks returned.
        logger.debug('There is no pending task. Sleep for 10 seconds.')
        time.sleep(10)
    for task in tasks:
        task_id, tmp_root, artman_user_config, log_file_path = _prepare_dir()
        log_file_handler = None
        try:
            log_file_handler = _setup_logger(log_file_path)
            logger.info('Starting to execute task %s' % task)
            if int(task['taskStatus']['attemptDispatchCount']) > MAX_ATTEMPTS:
                logger.info('Delete task which exceeds max attempts.')
                _delete_task(task_client, task)
                continue
            _execute_task(artman_user_config, task)
            _ack_task(task_client, task)
            logger.info('Task execution finished')
        except Exception as e:
            logger.error('\n'.join(traceback.format_tb(sys.exc_info()[2])))
            _cancel_task_lease(task_client, task)
        finally:
            logger.info('Cleanup tmp directory %s' % tmp_root)
            # Use task id as log name
            _write_to_cloud_logging(task_id, log_file_path)
            _cleanup(tmp_root, log_file_handler)
示例#6
0
文件: main.py 项目: znutsd37/artman
def _run_artman_in_docker(flags):
    """Executes artman command.

    Args:
        root_dir: The input directory that will be mounted to artman docker
            container as local googleapis directory.
    Returns:
        The output directory with artman-generated files.
    """
    ARTMAN_CONTAINER_NAME = 'artman-docker'
    root_dir = flags.root_dir
    output_dir = flags.output_dir
    artman_config_dirname = os.path.dirname(flags.config)
    docker_image = flags.image

    inner_artman_cmd_str = ' '.join(["'" + arg + "'" for arg in sys.argv[1:]])
    # Because artman now supports setting root dir in either command line or
    # user config, make sure `--root-dir` flag gets explicitly passed to the
    # artman command running inside Artman Docker container.
    if '--root-dir' not in inner_artman_cmd_str:
      inner_artman_cmd_str = '--root-dir %s %s' % (
          root_dir, inner_artman_cmd_str)

    # TODO(ethanbao): Such folder to folder mounting won't work on windows.
    base_cmd = [
        'docker', 'run', '--name', ARTMAN_CONTAINER_NAME, '--rm', '-i', '-t',
        '-e', 'HOST_USER_ID=%s' % os.getuid(),
        '-e', 'HOST_GROUP_ID=%s' % os.getgid(),
        '-e', '%s=True' % RUNNING_IN_ARTMAN_DOCKER_TOKEN,
        '-v', '%s:%s' % (root_dir, root_dir),
        '-v', '%s:%s' % (output_dir, output_dir),
        '-v', '%s:%s' % (artman_config_dirname, artman_config_dirname),
        '-w', root_dir
    ]
    base_cmd.extend([docker_image, '/bin/bash', '-c'])

    inner_artman_debug_cmd_str = inner_artman_cmd_str
    # Because debug_cmd is run inside the Docker image, we want to
    # make sure --local is set
    if '--local' not in inner_artman_debug_cmd_str:
        inner_artman_debug_cmd_str = '--local %s' % inner_artman_debug_cmd_str
    debug_cmd = list(base_cmd)
    debug_cmd.append('"artman %s; bash"' % inner_artman_debug_cmd_str)

    cmd = base_cmd
    cmd.append('artman --local %s' % (inner_artman_cmd_str))
    try:
        output = subprocess.check_output(cmd)
        logger.info(output.decode('utf8'))
        return output_dir
    except subprocess.CalledProcessError as e:
        logger.error(e.output.decode('utf8'))
        logger.error(
            'Artman execution failed. For additional logging, re-run the '
            'command with the "--verbose" flag')
        sys.exit(32)
    finally:
        logger.debug('For further inspection inside docker container, run `%s`'
                     % ' '.join(debug_cmd))
示例#7
0
文件: main.py 项目: googleapis/artman
def _run_artman_in_docker(flags):
    """Executes artman command.

    Args:
        root_dir: The input directory that will be mounted to artman docker
            container as local googleapis directory.
    Returns:
        The output directory with artman-generated files.
    """
    ARTMAN_CONTAINER_NAME = 'artman-docker'
    root_dir = flags.root_dir
    output_dir = flags.output_dir
    artman_config_dirname = os.path.dirname(flags.config)
    docker_image = flags.image

    inner_artman_cmd_str = ' '.join(sys.argv[1:])
    # Because artman now supports setting root dir in either command line or
    # user config, make sure `--root-dir` flag gets explicitly passed to the
    # artman command running inside Artman Docker container.
    if '--root-dir' not in inner_artman_cmd_str:
      inner_artman_cmd_str = '--root-dir %s %s' % (
          root_dir, inner_artman_cmd_str)

    # TODO(ethanbao): Such folder to folder mounting won't work on windows.
    base_cmd = [
        'docker', 'run', '--name', ARTMAN_CONTAINER_NAME, '--rm', '-i', '-t',
        '-e', 'HOST_USER_ID=%s' % os.getuid(),
        '-e', 'HOST_GROUP_ID=%s' % os.getgid(),
        '-e', '%s=True' % RUNNING_IN_ARTMAN_DOCKER_TOKEN,
        '-v', '%s:%s' % (root_dir, root_dir),
        '-v', '%s:%s' % (output_dir, output_dir),
        '-v', '%s:%s' % (artman_config_dirname, artman_config_dirname),
        '-w', root_dir
    ]
    base_cmd.extend([docker_image, '/bin/bash', '-c'])

    inner_artman_debug_cmd_str = inner_artman_cmd_str
    # Because debug_cmd is run inside the Docker image, we want to
    # make sure --local is set
    if '--local' not in inner_artman_debug_cmd_str:
        inner_artman_debug_cmd_str = '--local %s' % inner_artman_debug_cmd_str
    debug_cmd = list(base_cmd)
    debug_cmd.append('"artman %s; bash"' % inner_artman_debug_cmd_str)

    cmd = base_cmd
    cmd.append('artman --local %s' % (inner_artman_cmd_str))
    try:
        output = subprocess.check_output(cmd)
        logger.info(output.decode('utf8'))
        return output_dir
    except subprocess.CalledProcessError as e:
        logger.error(e.output.decode('utf8'))
        logger.error(
            'Artman execution failed. For additional logging, re-run the '
            'command with the "--verbose" flag')
        sys.exit(32)
    finally:
        logger.debug('For further inspection inside docker container, run `%s`'
                     % ' '.join(debug_cmd))
示例#8
0
def _get_publishing_config(artifact_config_pb, publish_target):
    valid_options = []
    for target in artifact_config_pb.publish_targets:
        valid_options.append(target.name)
        if target.name == publish_target:
            return target
    logger.error('No publish target with `%s` configured in artifact `%s`. '
                 'Valid options are %s' %
                 (publish_target, artifact_config_pb.name, valid_options))
    sys.exit(96)
示例#9
0
def _run_artman_in_docker(flags):
    """Executes artman command.

    Args:
        input_dir: The input directory that will be mounted to artman docker
            container as local googleapis directory.
    Returns:
        The output directory with artman-generated files.
    """
    ARTMAN_CONTAINER_NAME = 'artman-docker'
    input_dir = flags.input_dir
    output_dir = flags.output_dir
    artman_config_dirname = os.path.dirname(flags.config)
    user_config = os.path.join(os.path.expanduser('~'), '.artman')
    docker_image = flags.image

    inner_artman_cmd_str = ' '.join(sys.argv[1:])

    # TODO(ethanbao): Such folder to folder mounting won't work on windows.
    base_cmd = [
        'docker', 'run', '--name', ARTMAN_CONTAINER_NAME, '--rm', '-i', '-t',
        '-e',
        'HOST_USER_ID=%s' % os.getuid(), '-e',
        'HOST_GROUP_ID=%s' % os.getgid(), '-e',
        '%s=True' % RUNNING_IN_ARTMAN_DOCKER_TOKEN, '-v',
        '%s:%s' % (input_dir, input_dir), '-v',
        '%s:%s' % (output_dir, output_dir), '-v',
        '%s:%s' % (artman_config_dirname, artman_config_dirname), '-v',
        '%s:/home/.artman' % user_config, '-w', input_dir, docker_image,
        '/bin/bash', '-c'
    ]

    debug_cmd = list(base_cmd)
    debug_cmd.append('"artman2 %s; bash"' % inner_artman_cmd_str)

    cmd = base_cmd
    cmd.append('artman2 --local %s' % (inner_artman_cmd_str))
    try:
        output = subprocess.check_output(cmd)
        logger.info(output.decode('utf8'))
        return output_dir
    except subprocess.CalledProcessError as e:
        logger.error(e.output.decode('utf8'))
        logger.error(
            'Artman execution failed. For additional logging, re-run the '
            'command with the "--verbose" flag')
        raise
    finally:
        logger.debug(
            'For further inspection inside docker container, run `%s`' %
            ' '.join(debug_cmd))
示例#10
0
文件: github.py 项目: ethanbao/artman
    def execute(self, git_repo, github, branch_name, api_name, api_version,
                language):
        """Create a pull request on GitHub.

        Args:
            api_version (str): The version of the API. Used in the title of
                the pull request.
            language (str): The name of the language. Used in the title
                of the pull request.
        """
        # Determine the pull request title.
        pr_title = '{language} GAPIC: {api_name} {api_version}'.format(
            api_name=api_name.capitalize(),
            api_version=api_version,
            language=language.capitalize(),
        )

        # Determine the repo owner and name from the location, which is how
        # this API expects to receive this data.
        repo_loc = git_repo['location'].rstrip('/')
        repo_owner, repo_name = repo_loc.split(':')[-1].split('/')[-2:]
        if repo_name.endswith('.git'):
            repo_name = repo_name[:-4]

        # Instantiate the repo object.
        gh = github3.login(github['username'], github['token'])
        repo = gh.repository(repo_owner, repo_name)

        # Create the pull request.
        pr = repo.create_pull(
            base=git_repo.get('branch', 'master'),
            body='This pull request was generated by artman. '
                 'Please review it thoroughly before merging.',
            head=branch_name,
            title=pr_title,
        )

        # If we did not successfully create a pull request, this is an
        # error.
        if not pr:
            logger.error('Failed to create a pull request. You will need to '
                         'create a PR manually.')
            raise RuntimeError('Pull request creation failed.')

        # Log that the PR was created.
        logger.success('Pull request created: {url}'.format(
            url=pr.html_url,
        ))

        # Return back the pull request object.
        return pr
示例#11
0
def _run_artman_in_docker(flags):
    """Executes artman command.

    Args:
        root_dir: The input directory that will be mounted to artman docker
            container as local googleapis directory.
    Returns:
        The output directory with artman-generated files.
    """
    ARTMAN_CONTAINER_NAME = 'artman-docker'
    root_dir = flags.root_dir
    output_dir = flags.output_dir
    artman_config_dirname = os.path.dirname(flags.config)
    user_config = os.path.join(os.path.expanduser('~'), '.artman')
    docker_image = flags.image

    inner_artman_cmd_str = ' '.join(sys.argv[1:])

    # TODO(ethanbao): Such folder to folder mounting won't work on windows.
    base_cmd = [
        'docker', 'run', '--name', ARTMAN_CONTAINER_NAME, '--rm', '-i', '-t',
        '-e', 'HOST_USER_ID=%s' % os.getuid(),
        '-e', 'HOST_GROUP_ID=%s' % os.getgid(),
        '-e', '%s=True' % RUNNING_IN_ARTMAN_DOCKER_TOKEN,
        '-v', '%s:%s' % (root_dir, root_dir),
        '-v', '%s:%s' % (output_dir, output_dir),
        '-v', '%s:%s' % (artman_config_dirname, artman_config_dirname),
        '-v', '%s:/home/.artman' % user_config,
        '-w', root_dir,
        docker_image, '/bin/bash', '-c'
    ]

    debug_cmd = list(base_cmd)
    debug_cmd.append('"artman %s; bash"' % inner_artman_cmd_str)

    cmd = base_cmd
    cmd.append('artman --local %s' % (inner_artman_cmd_str))
    try:
        output = subprocess.check_output(cmd)
        logger.info(output.decode('utf8'))
        return output_dir
    except subprocess.CalledProcessError as e:
        logger.error(e.output.decode('utf8'))
        logger.error(
            'Artman execution failed. For additional logging, re-run the '
            'command with the "--verbose" flag')
        raise
    finally:
        logger.debug('For further inspection inside docker container, run `%s`'
                     % ' '.join(debug_cmd))
示例#12
0
    def execute(self, git_repo, github, branch_name, api_name, api_version,
                language):
        """Create a pull request on GitHub.

        Args:
            api_version (str): The version of the API. Used in the title of
                the pull request.
            language (str): The name of the language. Used in the title
                of the pull request.
        """
        # Determine the pull request title.
        pr_title = '{language} GAPIC: {api_name} {api_version}'.format(
            api_name=api_name.capitalize(),
            api_version=api_version,
            language=language.capitalize(),
        )

        # Determine the repo owner and name from the location, which is how
        # this API expects to receive this data.
        repo_loc = git_repo['location'].rstrip('/')
        repo_owner, repo_name = repo_loc.split(':')[-1].split('/')[-2:]
        if repo_name.endswith('.git'):
            repo_name = repo_name[:-4]

        # Instantiate the repo object.
        gh = github3.login(github['username'], github['token'])
        repo = gh.repository(repo_owner, repo_name)

        # Create the pull request.
        pr = repo.create_pull(
            base=git_repo.get('branch', 'master'),
            body='This pull request was generated by artman. '
            'Please review it thoroughly before merging.',
            head=branch_name,
            title=pr_title,
        )

        # If we did not successfully create a pull request, this is an
        # error.
        if not pr:
            logger.error('Failed to create a pull request. You will need to '
                         'create a PR manually.')
            raise RuntimeError('Pull request creation failed.')

        # Log that the PR was created.
        logger.success('Pull request created: {url}'.format(url=pr.html_url, ))

        # Return back the pull request object.
        return pr
示例#13
0
 def execute(self, bucket_name, path, output_dir):
     client = storage.Client()
     bucket = client.get_bucket(bucket_name)
     blob = bucket.get_blob(path)
     if not blob:
         logger.error('Cannot find the output from GCS.')
         return
     filename = os.path.join(output_dir, path)
     if not os.path.exists(os.path.dirname(filename)):
         try:
             os.makedirs(os.path.dirname(filename))
         except:
             raise
     with open(filename, "w") as f:
         blob.download_to_file(f)
         logger.info('File downloaded to %s.' % f.name)
示例#14
0
 def execute(self, bucket_name, path, output_dir):
     client = storage.Client()
     bucket = client.get_bucket(bucket_name)
     blob = bucket.get_blob(path)
     if not blob:
         logger.error('Cannot find the output from GCS.')
         return
     filename = os.path.join(output_dir, path)
     if not os.path.exists(os.path.dirname(filename)):
         try:
             os.makedirs(os.path.dirname(filename))
         except:
             raise
     with io.open(filename, "w", encoding='UTF-8') as f:
         blob.download_to_file(f)
         logger.info('File downloaded to %s.' % f.name)
示例#15
0
def _parse(artman_yaml_path):
    """Parse artman yaml config into corresponding protobuf."""
    if not os.path.exists(artman_yaml_path):
        raise ValueError(CONFIG_NOT_FOUND_ERROR_MESSAGE_FORMAT % artman_yaml_path)

    try:
        with io.open(artman_yaml_path, 'r', encoding='UTF-8') as f:
            # Convert yaml into json file as protobuf python load support paring of
            # protobuf in json or text format, not yaml.
            artman_config_json_string = json.dumps(yaml.load(f))
        config_pb = Config()
        json_format.Parse(artman_config_json_string, config_pb)
    except (json_format.ParseError, yaml.parser.ParserError):
        logger.error(INVALID_CONFIG_ERROR_MESSAGE_FORMAT % artman_yaml_path)
        raise

    return config_pb
示例#16
0
文件: main.py 项目: saicheems/artman
def _print_log(pipeline_id):
    # Fetch the cloud logging entry if the exection fails. Wait for 30 secs,
    # because it takes a while for the logging to become available.
    logger.critical(
        'The remote pipeline execution failed. It will wait for 30 '
        'seconds before fetching the log for remote pipeline execution.', )
    time.sleep(30)
    client = logging.Client()
    pipeline_logger = client.logger(pipeline_id)
    entries, token = pipeline_logger.list_entries()
    for entry in entries:
        logger.error(entry.payload)

    logger.info(
        'You can always run the following command to fetch the log entry:\n'
        '    gcloud beta logging read "logName=projects/vkit-pipeline/logs/%s"'
        % pipeline_id, )
示例#17
0
def _parse(artman_yaml_path):
    """Parse artman yaml config into corresponding protobuf."""
    if not os.path.exists(artman_yaml_path):
      raise ValueError(CONFIG_NOT_FOUND_ERROR_MESSAGE_FORMAT % artman_yaml_path)

    try:
        with io.open(artman_yaml_path, 'r', encoding='UTF-8') as f:
            # Convert yaml into json file as protobuf python load support paring of
            # protobuf in json or text format, not yaml.
            artman_config_json_string = json.dumps(yaml.load(f))
        config_pb = Config()
        json_format.Parse(artman_config_json_string, config_pb)
    except (json_format.ParseError, yaml.parser.ParserError):
        logger.error(INVALID_CONFIG_ERROR_MESSAGE_FORMAT % artman_yaml_path)
        raise

    return config_pb
示例#18
0
def parse_github_credentials(github_config, argv_flags):
    """Determine the appropriate GitHub credentials.

    If there are no vaild credentials, error out with a useful message
    so that the user can get credentials.

    Args:
        config (dict): The user github configuration pulled from the user's
            configuration file (if any).
        argv_flags (argparse.Namespace): The flags pulled from the command
            line.

    Returns:
        dict: A dictionary with 'github_username' and 'github_token' keys.
    """
    # Determine whether we have valid credentials.
    valid = all([
        github_config.username or argv_flags.github_username,
        github_config.token or argv_flags.github_token,
    ])

    # No valid credentials, give a helpful error.
    if not valid:
        logger.critical('No GitHub credentials found.')
        logger.error('Valid GitHub credentials are required if you are '
                     'publishing to GitHub (--publish github).')
        logger.warn('')
        logger.warn('In order to generate the appropriate token, perform '
                    'the following steps:')
        logger.warn('  1. Open https://github.com/settings/tokens')
        logger.warn('  2. Make a new access token with the "repo" scope.')
        logger.warn('  3. Add this structure to ~/.artman/config.yaml: ')
        logger.warn('')
        logger.warn('      github:')
        logger.warn('        username: {username}')
        logger.warn('        token: {token}')
        logger.warn('')
        logger.error('This is a terminal error. Exiting.')
        sys.exit(64)

    # Return the appropriate credentials.
    return {
        'username': argv_flags.github_username or github_config.username,
        'token': argv_flags.github_token or github_config.token,
    }
示例#19
0
def parse_github_credentials(config, argv_flags):
    """Determine the appropriate GitHub credentials.

    If there are no vaild credentials, error out with a useful message
    so that the user can get credentials.

    Args:
        config (dict): The user github configuration pulled from the user's
            configuration file (if any).
        argv_flags (argparse.Namespace): The flags pulled from the command
            line.

    Returns:
        dict: A dictionary with 'github_username' and 'github_token' keys.
    """
    # Determine whether we have valid credentials.
    valid = all([
        'username' in config or argv_flags.github_username,
        'token' in config or argv_flags.github_token,
    ])

    # No valid credentials, give a helpful error.
    if not valid:
        logger.critical('No GitHub credentials found.')
        logger.error('Valid GitHub credentials are required if you are '
                     'publishing to GitHub (--publish github).')
        logger.warn('')
        logger.warn('In order to generate the appropriate token, perform '
                    'the following steps:')
        logger.warn('  1. Open https://github.com/settings/tokens')
        logger.warn('  2. Make a new access token with the "repo" scope.')
        logger.warn('  3. Add this structure to ~/.artman/config.yaml: ')
        logger.warn('')
        logger.warn('      github:')
        logger.warn('        username: {username}')
        logger.warn('        token: {token}')
        logger.warn('')
        logger.error('This is a terminal error. Exiting.')
        sys.exit(64)

    # Return the appropriate credentials.
    return {
        'username': argv_flags.github_username or config['username'],
        'token': argv_flags.github_token or config['token'],
    }
示例#20
0
def _configure_publish(publish=None):
    """Determine and return the default publisher.

    Args:
        publish (str): The current default publisher (may be None).

    Returns:
        str: The new default publisher.
    """
    # Set up publishing defaults.
    logger.info('Where do you want to publish code by default?')
    logger.info('The common valid options are "github" and "local".')
    publish = six.moves.input('Default publisher: ').lower()
    try:
        importlib.import_module('artman.tasks.publish.%s' % publish)
        return publish
    except ImportError:
        logger.error('Invalid publisher.')
        return _configure_publish()
示例#21
0
文件: main.py 项目: shinfan/artman
def _print_log(pipeline_id):
    # Fetch the cloud logging entry if the exection fails. Wait for 30 secs,
    # because it takes a while for the logging to become available.
    logger.critical(
        'The remote pipeline execution failed. It will wait for 30 '
        'seconds before fetching the log for remote pipeline execution.',
    )
    time.sleep(30)
    client = logging.Client()
    pipeline_logger = client.logger(pipeline_id)
    entries, token = pipeline_logger.list_entries()
    for entry in entries:
        logger.error(entry.payload)

    logger.info(
        'You can always run the following command to fetch the log entry:\n'
        '    gcloud beta logging read "logName=projects/vkit-pipeline/logs/%s"'
        % pipeline_id,
    )
示例#22
0
def read_user_config(artman_user_config_path):
    """Parse and return artman config"""
    config_pb = UserConfig()
    artman_user_config_path = os.path.expanduser(artman_user_config_path)
    if not os.path.isfile(artman_user_config_path):
      logger.warn(
          'No artman user config defined. Use the default one for this '
          'execution. Run `configure-artman` to set up user config.')
      return config_pb

    try:
        with io.open(artman_user_config_path, 'r', encoding='UTF-8') as f:
            # Convert yaml into json file as protobuf python load support
            # parsing of protobuf in json or text format, not yaml.
            json_string = json.dumps(yaml.load(f))
        json_format.Parse(json_string, config_pb)
    except (json_format.ParseError, yaml.parser.ParserError):
        logger.error(INVALID_USER_CONFIG_ERROR_MESSAGE_FORMAT % artman_user_config_path)
        raise

    return config_pb
示例#23
0
def read_user_config(artman_user_config_path):
    """Parse and return artman config"""
    config_pb = UserConfig()
    artman_user_config_path = os.path.expanduser(artman_user_config_path)
    if not os.path.isfile(artman_user_config_path):
      logger.warn(
          'No artman user config defined. Use the default one for this '
          'execution. Run `configure-artman` to set up user config.')
      return config_pb

    try:
        with io.open(artman_user_config_path, 'r', encoding='UTF-8') as f:
            # Convert yaml into json file as protobuf python load support
            # parsing of protobuf in json or text format, not yaml.
            json_string = json.dumps(yaml.load(f))
        json_format.Parse(json_string, config_pb)
    except (json_format.ParseError, yaml.parser.ParserError):
        logger.error(INVALID_USER_CONFIG_ERROR_MESSAGE_FORMAT % artman_user_config_path)
        raise

    return config_pb
示例#24
0
def check_docker_requirements(docker_image):
    """Checks whether all docker-related components have been installed."""

    try:
        subprocess.check_output(['docker', '--version'])
    except OSError:
        logger.error(
            'Docker not found on path or is not installed. Refert to '
            'https://docs.docker.com/engine/installation about how to install '
            'Docker on your local machine.')
        sys.exit(128)

    try:
        output = subprocess.check_output(
            ['docker', 'images', '-q', docker_image])
        if not output:
            logger.error(
                'Cannot find artman Docker image. Run `docker pull %s` to '
                'pull the image.' % docker_image)
            sys.exit(128)
    except OSError:
        logger.error('Docker image check failed. Please file an issue at '
                     'https://github.com/googleapis/artman/issues.')
        sys.exit(128)
示例#25
0
def check_docker_requirements(docker_image):
    """Checks whether all docker-related components have been installed."""

    try:
        subprocess.check_output(['docker', '--version'])
    except OSError:
        logger.error(
            'Docker not found on path or is not installed. Refer to '
            'https://docs.docker.com/engine/installation about how to install '
            'Docker on your local machine.')
        sys.exit(128)

    try:
        output = subprocess.check_output(
            ['docker', 'images', '-q', docker_image])
        if not output:
            logger.error(
                'Cannot find artman Docker image. Run `docker pull %s` to '
                'pull the image.' % docker_image)
            sys.exit(128)
    except OSError:
        logger.error('Docker image check failed. Please file an issue at '
                    'https://github.com/googleapis/artman/issues.')
        sys.exit(128)
示例#26
0
文件: main2.py 项目: Landrito/artman
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.input_dir:
        flags.input_dir = os.path.abspath(flags.input_dir)
    flags.output_dir = os.path.abspath(flags.output_dir)
    flags.config = os.path.abspath(flags.config)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(
        user_config, flags.input_dir)

    # Save the input directory back to flags if it was not explicitly set.
    if not flags.input_dir:
        flags.input_dir = pipeline_args['local_paths']['googleapis']

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.input_dir)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    shared_config_name = 'common.yaml'
    if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
        shared_config_name = 'doc.yaml'

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, flags.input_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))
    tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
    with io.open(tmp_legacy_config_yaml, 'w') as outfile:
        yaml.dump(legacy_config_dict, outfile, default_flow_style=False)

    googleapis = os.path.realpath(
        os.path.expanduser(
            pipeline_args['local_paths']['googleapis'], ))
    config = ','.join([
        '{artman_config_path}',
        '{googleapis}/gapic/lang/{shared_config_name}',
    ]).format(
        artman_config_path=tmp_legacy_config_yaml,
        googleapis=googleapis,
        shared_config_name=shared_config_name,
    )

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline as well as package_type and packaging
    artifact_type = artifact_config.type
    if artifact_type in (Artifact.GAPIC, Artifact.GAPIC_ONLY):
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type in (Artifact.GRPC, Artifact.GRPC_COMMON):
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    # Note: the var replacement is still needed because they are still being
    # used in some shared/common config yamls.
    config_sections = ['common']
    for config_spec in config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                k.upper(): v
                for k, v in pipeline_args['local_paths'].items()
            },
            language=language, )
        pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            pipeline_args['publish'] = 'local' if flags.dry_run else 'github'
            pipeline_args['github'] = support.parse_github_credentials(
                argv_flags=flags,
                config=user_config.get('github', {}), )
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Clean up the tmp legacy artman config.
    os.remove(tmp_legacy_config_yaml)

    # Return the final arguments.
    return pipeline_name, pipeline_args
示例#27
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    flags.input_dir = os.path.abspath(flags.input_dir)
    flags.output_dir = os.path.abspath(flags.output_dir)
    flags.config = os.path.abspath(flags.config)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(
        user_config, flags.input_dir)

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.input_dir)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    shared_config_name = 'common.yaml'
    if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
        shared_config_name = 'doc.yaml'

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, flags.input_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))
    tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
    with io.open(tmp_legacy_config_yaml, 'w') as outfile:
        yaml.dump(legacy_config_dict, outfile, default_flow_style=False)

    googleapis = os.path.realpath(
        os.path.expanduser(
            pipeline_args['local_paths']['googleapis'], ))
    config = ','.join([
        '{artman_config_path}',
        '{googleapis}/gapic/lang/{shared_config_name}',
    ]).format(
        artman_config_path=tmp_legacy_config_yaml,
        googleapis=googleapis,
        shared_config_name=shared_config_name, )

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline as well as package_type and packaging
    artifact_type = artifact_config.type
    if artifact_type in (Artifact.GAPIC, Artifact.GAPIC_ONLY):
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type in (Artifact.GRPC, Artifact.GRPC_COMMON):
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    # Note: the var replacement is still needed because they are still being
    # used in some shared/common config yamls.
    config_sections = ['common']
    for config_spec in config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                k.upper(): v
                for k, v in pipeline_args['local_paths'].items()
            },
            language=language, )
        pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            pipeline_args['publish'] = 'local' if flags.dry_run else 'github'
            pipeline_args['github'] = support.parse_github_credentials(
                argv_flags=flags,
                config=user_config.get('github', {}), )
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Clean up the tmp legacy artman config.
    os.remove(tmp_legacy_config_yaml)

    # Return the final arguments.
    return pipeline_name, pipeline_args
示例#28
0
文件: main.py 项目: znutsd37/artman
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    pipeline_args['toolkit_path'] = user_config.local.toolkit
    pipeline_args['generator_args'] = flags.generator_args

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.aspect)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    pipeline_args['aspect'] = Artifact.Aspect.Name(artifact_config.aspect)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
        if os.path.abspath(flags.output_dir) != os.path.abspath(DEFAULT_OUTPUT_DIR):
            logger.warning("`output_dir` is ignored in DiscoGapicConfigGen. "
             + "Yamls are saved at the path specified by `gapic_yaml`.")
        pipeline_args['output_dir'] = tempfile.mkdtemp()
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    config_args = config_util.load_config_spec(legacy_config_dict, language)
    config_args.update(pipeline_args)
    pipeline_args = config_args
    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    return pipeline_name, pipeline_args
示例#29
0
文件: main.py 项目: saicheems/artman
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from ~/.artman/config.yaml.

    Returns:
        tuple (str, dict, str): 3-tuple containing:
            - pipeline name
            - pipeline arguments
            - 'remote' or None
    """
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(
        user_config, flags.googleapis)

    # In most cases, we get a language.
    if flags.language:
        pipeline_args['language'] = flags.language
    elif flags.pipeline_name != 'GapicConfigPipeline':
        logger.critical('--language is required for every pipeline except '
                        'GapicConfigPipeline.')
        sys.exit(64)

    # If this is remote execution, configure that.
    if flags.remote:
        pipeline_id = str(uuid.uuid4())
        # Use a unique random temp directory for remote execution.
        # TODO(ethanbao): Let remote artman decide the temp directory.
        pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format(
            id=pipeline_id, )
        pipeline_args['pipeline_id'] = pipeline_id

    # Specify the default pipeline settings - this may change if
    # BATCH is set
    default_pipeline = 'GapicClientPipeline'

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    if flags.api:
        shared_config_name = 'common.yaml'
        if flags.language in (
                'ruby',
                'nodejs',
        ):
            shared_config_name = 'doc.yaml'

        googleapis = os.path.realpath(
            os.path.expanduser(pipeline_args['local_paths']['googleapis'], ))
        flags.config = ','.join([
            '{googleapis}/gapic/api/artman_{api}.yaml',
            '{googleapis}/gapic/lang/{shared_config_name}',
        ]).format(
            api=flags.api,
            googleapis=googleapis,
            shared_config_name=shared_config_name,
        )
    elif flags.batch:
        googleapis = os.path.realpath(
            os.path.expanduser(pipeline_args['local_paths']['googleapis'], ))
        flags.config = '{googleapis}/gapic/batch/common.yaml'.format(
            googleapis=googleapis, )
        default_pipeline = 'GapicClientBatchPipeline'
        if not flags.publish:
            # If publish flag was not set by the user, set it here.
            # This prevents the user config yaml from causing a
            # publish event when batch mode is used.
            flags.publish = 'noop'
        if flags.target:
            logger.critical('--target and --batch cannot both be specified; '
                            'when using --batch, the repo must be the default '
                            'specified in the artman config yaml file (or '
                            'staging if no default is provided).')
            sys.exit(64)

    # Set the pipeline if none was specified
    if not flags.pipeline_name:
        flags.pipeline_name = default_pipeline

    # Determine where to publish.
    pipeline_args['publish'] = support.resolve(
        'publish',
        user_config,
        flags,
        default='local',
    )

    # Parse out the GitHub credentials iff we are publishing to GitHub.
    if pipeline_args['publish'] == 'github':
        pipeline_args['github'] = support.parse_github_credentials(
            argv_flags=flags,
            config=user_config.get('github', {}),
        )

    # Parse out the full configuration.
    config_sections = ['common']
    for config_spec in flags.config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                k.upper(): v
                for k, v in pipeline_args['local_paths'].items()
            },
            language=flags.language,
        )
        pipeline_args.update(config_args)

    # Add any arbitrary keyword arguments.
    if flags.pipeline_kwargs != '{}':
        logger.warn('The use of --pipeline-kwargs is discouraged.')
        cmd_args = ast.literal_eval(flags.pipeline_kwargs)
        pipeline_args.update(cmd_args)

    # Coerce `git_repos` and `target_repo` into a single git_repo.
    if pipeline_args['publish'] in ('github', 'local') and not flags.batch:
        # Temporarily give our users a nice error if they have an older
        # googleapis checkout.
        # DEPRECATED: 2017-04-20
        # REMOVE: 2017-05-20
        if 'git_repo' in pipeline_args:
            logger.error('Your git repos are configured in your artman YAML '
                         'using a older format. Please git pull.')
            sys.exit(96)

        # Pop the git repos off of the pipeline args and select the
        # correct one.
        repos = pipeline_args.pop('git_repos')
        pipeline_args['git_repo'] = support.select_git_repo(
            repos, flags.target)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    # This includes a pipeline to run, arguments, and whether to run remotely.
    return (
        flags.pipeline_name,
        pipeline_args,
        'remote' if flags.remote else None,
    )
示例#30
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    # TODO two args reference the same concept - clean this up
    pipeline_args['toolkit'] = user_config.local.toolkit
    pipeline_args['toolkit_path'] = user_config.local.toolkit

    if flags.subcommand == 'publish' and flags.local_repo_dir:
        if not flags.dry_run:
            logger.error('`--dry-run` flag must be passed when '
                         '`--local-repo-dir` is specified')
            sys.exit(96)
        flags.local_repo_dir = os.path.abspath(flags.local_repo_dir)
        pipeline_args['local_repo_dir'] = flags.local_repo_dir

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error('Artman config file `%s` doesn\'t exist.' %
                     artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(artman_config_path,
                                                      flags.artifact_name)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))

    language = Artifact.Language.Name(artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    config_args = config_util.load_config_spec(legacy_config_dict, language)
    pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            if flags.dry_run:
                pipeline_args['publish'] = 'local'
            else:
                pipeline_args['publish'] = 'github'
                pipeline_args['github'] = support.parse_github_credentials(
                    argv_flags=flags, github_config=user_config.github)
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    return pipeline_name, pipeline_args
示例#31
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    pipeline_args['toolkit_path'] = user_config.local.toolkit

    if flags.subcommand == 'publish' and flags.local_repo_dir:
        if not flags.dry_run:
            logger.error('`--dry-run` flag must be passed when '
                         '`--local-repo-dir` is specified')
            sys.exit(96)
        flags.local_repo_dir = os.path.abspath(flags.local_repo_dir)
        pipeline_args['local_repo_dir'] = flags.local_repo_dir

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name, flags.aspect)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    pipeline_args['aspect'] = Artifact.Aspect.Name(artifact_config.aspect)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    config_args = config_util.load_config_spec(legacy_config_dict, language)
    pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            if flags.dry_run:
                pipeline_args['publish'] = 'local'
            else:
                pipeline_args['publish'] = 'github'
                pipeline_args['github'] = support.parse_github_credentials(
                    argv_flags=flags,
                    github_config=user_config.github)
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    return pipeline_name, pipeline_args
示例#32
0
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from
                            ~/.artman/config.yaml.

    Returns:
        tuple (str, dict): 2-tuple containing:
            - pipeline name
            - pipeline arguments
    """
    if flags.root_dir:
        flags.root_dir = os.path.abspath(flags.root_dir)
        flags.config = os.path.join(flags.root_dir, flags.config)
    else:
        flags.root_dir = os.getcwd()
        flags.config = os.path.abspath(flags.config)
    root_dir = flags.root_dir
    flags.output_dir = os.path.abspath(flags.output_dir)
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = INFO
    if getattr(flags, 'verbosity', None):
        verbosity = getattr(flags, 'verbosity')
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['root_dir'] = root_dir
    pipeline_args['toolkit'] = user_config.local.toolkit

    if flags.subcommand == 'publish' and flags.local_repo_dir:
        if not flags.dry_run:
            logger.error('`--dry-run` flag must be passed when '
                         '`--local-repo-dir` is specified')
            sys.exit(96)
        flags.local_repo_dir = os.path.abspath(flags.local_repo_dir)
        pipeline_args['local_repo_dir'] = flags.local_repo_dir

    artman_config_path = flags.config
    if not os.path.isfile(artman_config_path):
        logger.error(
            'Artman config file `%s` doesn\'t exist.' % artman_config_path)
        sys.exit(96)

    try:
        artifact_config = loader.load_artifact_config(
            artman_config_path, flags.artifact_name)
    except ValueError as ve:
        logger.error('Artifact config loading failed with `%s`' % ve)
        sys.exit(96)

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    shared_config_name = 'common.yaml'
    if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
        shared_config_name = 'doc.yaml'

    legacy_config_dict = converter.convert_to_legacy_config_dict(
        artifact_config, root_dir, flags.output_dir)
    logger.debug('Below is the legacy config after conversion:\n%s' %
                 pprint.pformat(legacy_config_dict))
    tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
    with io.open(tmp_legacy_config_yaml, 'w') as outfile:
        yaml.dump(legacy_config_dict, outfile, default_flow_style=False)

    config = ','.join([
        '{artman_config_path}',
        '{googleapis}/gapic/lang/{shared_config_name}',
    ]).format(
        artman_config_path=tmp_legacy_config_yaml,
        googleapis=root_dir,
        shared_config_name=shared_config_name,
    )

    language = Artifact.Language.Name(
        artifact_config.language).lower()

    # Set the pipeline
    artifact_type = artifact_config.type
    pipeline_args['artifact_type'] = Artifact.Type.Name(artifact_type)
    if artifact_type == Artifact.GAPIC_ONLY:
        pipeline_name = 'GapicOnlyClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC:
        pipeline_name = 'GapicClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.DISCOGAPIC:
        pipeline_name = 'DiscoGapicClientPipeline'
        pipeline_args['language'] = language
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.GRPC:
        pipeline_name = 'GrpcClientPipeline'
        pipeline_args['language'] = language
    elif artifact_type == Artifact.GAPIC_CONFIG:
        pipeline_name = 'GapicConfigPipeline'
    elif artifact_type == Artifact.DISCOGAPIC_CONFIG:
        pipeline_name = 'DiscoGapicConfigPipeline'
        pipeline_args['discovery_doc'] = artifact_config.discovery_doc
    elif artifact_type == Artifact.PROTOBUF:
        pipeline_name = 'ProtoClientPipeline'
        pipeline_args['language'] = language
    else:
        raise ValueError('Unrecognized artifact.')

    # Parse out the full configuration.
    # Note: the var replacement is still needed because they are still being
    # used in some shared/common config yamls.
    config_sections = ['common']
    for config_spec in config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={
                'GOOGLEAPIS': root_dir,
                'DISCOVERY_ARTIFACT_MANAGER': root_dir,
                'TOOLKIT': user_config.local.toolkit
            },
            language=language, )
        pipeline_args.update(config_args)

    # Setup publishing related config if needed.
    if flags.subcommand == 'generate':
        pipeline_args['publish'] = 'noop'
    elif flags.subcommand == 'publish':
        publishing_config = _get_publishing_config(artifact_config,
                                                   flags.target)
        if publishing_config.type == Artifact.PublishTarget.GITHUB:
            if flags.dry_run:
                pipeline_args['publish'] = 'local'
            else:
                pipeline_args['publish'] = 'github'
                pipeline_args['github'] = support.parse_github_credentials(
                    argv_flags=flags,
                    github_config=user_config.github)
            repos = pipeline_args.pop('git_repos')
            pipeline_args['git_repo'] = support.select_git_repo(
                repos, publishing_config.name)
        else:
            logger.error(
                'Publishing type `%s` is not supported yet.' %
                Artifact.PublishTarget.Type.Name(publishing_config.type))
            sys.exit(96)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(
        pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2, )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Clean up the tmp legacy artman config.
    os.remove(tmp_legacy_config_yaml)

    # Return the final arguments.
    return pipeline_name, pipeline_args
示例#33
0
文件: main.py 项目: shinfan/artman
def normalize_flags(flags, user_config):
    """Combine the argparse flags and user configuration together.

    Args:
        flags (argparse.Namespace): The flags parsed from sys.argv
        user_config (dict): The user configuration taken from ~/.artman/config.yaml.

    Returns:
        tuple (str, dict, str): 3-tuple containing:
            - pipeline name
            - pipeline arguments
            - 'remote' or None
    """
    pipeline_args = {}

    # Determine logging verbosity and then set up logging.
    verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
    setup_logging(verbosity)

    # Save local paths, if applicable.
    # This allows the user to override the path to api-client-staging or
    # toolkit on his or her machine.
    pipeline_args['local_paths'] = support.parse_local_paths(user_config,
                                                             flags.googleapis)

    # In most cases, we get a language.
    if flags.language:
        pipeline_args['language'] = flags.language
    elif flags.pipeline_name != 'GapicConfigPipeline':
        logger.critical('--language is required for every pipeline except '
                        'GapicConfigPipeline.')
        sys.exit(64)

    # If this is remote execution, configure that.
    if flags.remote:
        pipeline_id = str(uuid.uuid4())
        # Use a unique random temp directory for remote execution.
        # TODO(ethanbao): Let remote artman decide the temp directory.
        pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format(
            id=pipeline_id,
        )
        pipeline_args['pipeline_id'] = pipeline_id

    # Specify the default pipeline settings - this may change if
    # BATCH is set
    default_pipeline = 'GapicClientPipeline'

    # If we were given just an API or BATCH, then expand it into the --config
    # syntax.
    if flags.api:
        shared_config_name = 'common.yaml'
        if flags.language == 'ruby':
            shared_config_name = 'doc.yaml'

        googleapis = os.path.realpath(os.path.expanduser(
            pipeline_args['local_paths']['googleapis'],
        ))
        flags.config = ','.join([
            '{googleapis}/gapic/api/artman_{api}.yaml',
            '{googleapis}/gapic/lang/{shared_config_name}',
        ]).format(
            api=flags.api,
            googleapis=googleapis,
            shared_config_name=shared_config_name,
        )
    elif flags.batch:
        googleapis = os.path.realpath(os.path.expanduser(
            pipeline_args['local_paths']['googleapis'],
        ))
        flags.config = '{googleapis}/gapic/batch/common.yaml'.format(
            googleapis=googleapis,
        )
        default_pipeline = 'GapicClientBatchPipeline'
        if not flags.publish:
            # If publish flag was not set by the user, set it here.
            # This prevents the user config yaml from causing a
            # publish event when batch mode is used.
            flags.publish = 'noop'
        if flags.target:
            logger.critical('--target and --batch cannot both be specified; '
                            'when using --batch, the repo must be the default '
                            'specified in the artman config yaml file (or '
                            'staging if no default is provided).')
            sys.exit(64)

    # Set the pipeline if none was specified
    if not flags.pipeline_name:
        flags.pipeline_name = default_pipeline

    # Determine where to publish.
    pipeline_args['publish'] = support.resolve('publish', user_config, flags,
        default='local',
    )

    # Parse out the GitHub credentials iff we are publishing to GitHub.
    if pipeline_args['publish'] == 'github':
        pipeline_args['github'] = support.parse_github_credentials(
            argv_flags=flags,
            config=user_config.get('github', {}),
        )

    # Parse out the full configuration.
    config_sections = ['common']
    for config_spec in flags.config.split(','):
        config_args = config_util.load_config_spec(
            config_spec=config_spec,
            config_sections=config_sections,
            repl_vars={k.upper(): v for k, v in
                       pipeline_args['local_paths'].items()},
            language=flags.language,
        )
        pipeline_args.update(config_args)

    # Add any arbitrary keyword arguments.
    if flags.pipeline_kwargs != '{}':
        logger.warn('The use of --pipeline-kwargs is discouraged.')
        cmd_args = ast.literal_eval(flags.pipeline_kwargs)
        pipeline_args.update(cmd_args)

    # Coerce `git_repos` and `target_repo` into a single git_repo.
    if pipeline_args['publish'] in ('github', 'local') and not flags.batch:
        # Temporarily give our users a nice error if they have an older
        # googleapis checkout.
        # DEPRECATED: 2017-04-20
        # REMOVE: 2017-05-20
        if 'git_repo' in pipeline_args:
            logger.error('Your git repos are configured in your artman YAML '
                         'using a older format. Please git pull.')
            sys.exit(96)

        # Pop the git repos off of the pipeline args and select the
        # correct one.
        repos = pipeline_args.pop('git_repos')
        pipeline_args['git_repo'] = support.select_git_repo(repos, flags.target)

    # Print out the final arguments to stdout, to help the user with
    # possible debugging.
    pipeline_args_repr = yaml.dump(pipeline_args,
        block_seq_indent=2,
        default_flow_style=False,
        indent=2,
    )
    logger.info('Final args:')
    for line in pipeline_args_repr.split('\n'):
        if 'token' in line:
            index = line.index(':')
            line = line[:index + 2] + '<< REDACTED >>'
        logger.info('  {0}'.format(line))

    # Return the final arguments.
    # This includes a pipeline to run, arguments, and whether to run remotely.
    return (
        flags.pipeline_name,
        pipeline_args,
        'remote' if flags.remote else None,
    )