def read_user_config(flags): """Read the user config from disk and return it. Args: flags (argparse.Namespace): The flags from sys.argv. Returns: dict: The user config. """ # Load the user configuration if it exists and save a dictionary. user_config = {} user_config_file = os.path.realpath(os.path.expanduser(flags.user_config)) if os.path.isfile(user_config_file): with io.open(user_config_file) as ucf: user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {} # Sanity check: Is there a configuration? If not, abort. if not user_config: setup_logging(INFO) logger.critical('No user configuration found.') logger.warn('This is probably your first time running Artman.') logger.warn('Run `configure-artman` to get yourself set up.') sys.exit(64) # Done; return the user config. return user_config
def protoc_common_resources_params(root_dir, common_resources=None): resources = common_resources if resources is None: default = os.path.join("google", "cloud", "common_resources.proto") resources = [default] common_resources_includes = [] common_resources_paths = [] for res in resources: path = os.path.join(root_dir, res) if os.path.exists(path): common_resources_paths.append(path) common_resources_includes.append("-I{}={}".format(res, path)) else: logger.warn("Could not find resources file at: {}".format(path)) return (common_resources_includes, common_resources_paths)
def read_user_config(artman_user_config_path): """Parse and return artman config""" config_pb = UserConfig() artman_user_config_path = os.path.expanduser(artman_user_config_path) if not os.path.isfile(artman_user_config_path): logger.warn( 'No artman user config defined. Use the default one for this ' 'execution. Run `configure-artman` to set up user config.') return config_pb try: with io.open(artman_user_config_path, 'r', encoding='UTF-8') as f: # Convert yaml into json file as protobuf python load support # parsing of protobuf in json or text format, not yaml. json_string = json.dumps(yaml.load(f)) json_format.Parse(json_string, config_pb) except (json_format.ParseError, yaml.parser.ParserError): logger.error(INVALID_USER_CONFIG_ERROR_MESSAGE_FORMAT % artman_user_config_path) raise return config_pb
def parse_github_credentials(config, argv_flags): """Determine the appropriate GitHub credentials. If there are no vaild credentials, error out with a useful message so that the user can get credentials. Args: config (dict): The user github configuration pulled from the user's configuration file (if any). argv_flags (argparse.Namespace): The flags pulled from the command line. Returns: dict: A dictionary with 'github_username' and 'github_token' keys. """ # Determine whether we have valid credentials. valid = all([ 'username' in config or argv_flags.github_username, 'token' in config or argv_flags.github_token, ]) # No valid credentials, give a helpful error. if not valid: logger.critical('No GitHub credentials found.') logger.error('Valid GitHub credentials are required if you are ' 'publishing to GitHub (--publish github).') logger.warn('') logger.warn('In order to generate the appropriate token, perform ' 'the following steps:') logger.warn(' 1. Open https://github.com/settings/tokens') logger.warn(' 2. Make a new access token with the "repo" scope.') logger.warn(' 3. Add this structure to ~/.artman/config.yaml: ') logger.warn('') logger.warn(' github:') logger.warn(' username: {username}') logger.warn(' token: {token}') logger.warn('') logger.error('This is a terminal error. Exiting.') sys.exit(64) # Return the appropriate credentials. return { 'username': argv_flags.github_username or config['username'], 'token': argv_flags.github_token or config['token'], }
def normalize_flags(flags, user_config): """Combine the argparse flags and user configuration together. Args: flags (argparse.Namespace): The flags parsed from sys.argv user_config (dict): The user configuration taken from ~/.artman/config.yaml. Returns: tuple (str, dict, str): 3-tuple containing: - pipeline name - pipeline arguments - 'remote' or None """ pipeline_args = {} # Determine logging verbosity and then set up logging. verbosity = support.resolve('verbosity', user_config, flags, default=INFO) setup_logging(verbosity) # Save local paths, if applicable. # This allows the user to override the path to api-client-staging or # toolkit on his or her machine. pipeline_args['local_paths'] = support.parse_local_paths(user_config, flags.googleapis) # In most cases, we get a language. if flags.language: pipeline_args['language'] = flags.language elif flags.pipeline_name != 'GapicConfigPipeline': logger.critical('--language is required for every pipeline except ' 'GapicConfigPipeline.') sys.exit(64) # If this is remote execution, configure that. if flags.remote: pipeline_id = str(uuid.uuid4()) # Use a unique random temp directory for remote execution. # TODO(ethanbao): Let remote artman decide the temp directory. pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format( id=pipeline_id, ) pipeline_args['pipeline_id'] = pipeline_id # Specify the default pipeline settings - this may change if # BATCH is set default_pipeline = 'GapicClientPipeline' # If we were given just an API or BATCH, then expand it into the --config # syntax. if flags.api: shared_config_name = 'common.yaml' if flags.language == 'ruby': shared_config_name = 'doc.yaml' googleapis = os.path.realpath(os.path.expanduser( pipeline_args['local_paths']['googleapis'], )) flags.config = ','.join([ '{googleapis}/gapic/api/artman_{api}.yaml', '{googleapis}/gapic/lang/{shared_config_name}', ]).format( api=flags.api, googleapis=googleapis, shared_config_name=shared_config_name, ) elif flags.batch: googleapis = os.path.realpath(os.path.expanduser( pipeline_args['local_paths']['googleapis'], )) flags.config = '{googleapis}/gapic/batch/common.yaml'.format( googleapis=googleapis, ) default_pipeline = 'GapicClientBatchPipeline' if not flags.publish: # If publish flag was not set by the user, set it here. # This prevents the user config yaml from causing a # publish event when batch mode is used. flags.publish = 'noop' if flags.target: logger.critical('--target and --batch cannot both be specified; ' 'when using --batch, the repo must be the default ' 'specified in the artman config yaml file (or ' 'staging if no default is provided).') sys.exit(64) # Set the pipeline if none was specified if not flags.pipeline_name: flags.pipeline_name = default_pipeline # Determine where to publish. pipeline_args['publish'] = support.resolve('publish', user_config, flags, default='local', ) # Parse out the GitHub credentials iff we are publishing to GitHub. if pipeline_args['publish'] == 'github': pipeline_args['github'] = support.parse_github_credentials( argv_flags=flags, config=user_config.get('github', {}), ) # Parse out the full configuration. config_sections = ['common'] for config_spec in flags.config.split(','): config_args = config_util.load_config_spec( config_spec=config_spec, config_sections=config_sections, repl_vars={k.upper(): v for k, v in pipeline_args['local_paths'].items()}, language=flags.language, ) pipeline_args.update(config_args) # Add any arbitrary keyword arguments. if flags.pipeline_kwargs != '{}': logger.warn('The use of --pipeline-kwargs is discouraged.') cmd_args = ast.literal_eval(flags.pipeline_kwargs) pipeline_args.update(cmd_args) # Coerce `git_repos` and `target_repo` into a single git_repo. if pipeline_args['publish'] in ('github', 'local') and not flags.batch: # Temporarily give our users a nice error if they have an older # googleapis checkout. # DEPRECATED: 2017-04-20 # REMOVE: 2017-05-20 if 'git_repo' in pipeline_args: logger.error('Your git repos are configured in your artman YAML ' 'using a older format. Please git pull.') sys.exit(96) # Pop the git repos off of the pipeline args and select the # correct one. repos = pipeline_args.pop('git_repos') pipeline_args['git_repo'] = support.select_git_repo(repos, flags.target) # Print out the final arguments to stdout, to help the user with # possible debugging. pipeline_args_repr = yaml.dump(pipeline_args, block_seq_indent=2, default_flow_style=False, indent=2, ) logger.info('Final args:') for line in pipeline_args_repr.split('\n'): if 'token' in line: index = line.index(':') line = line[:index + 2] + '<< REDACTED >>' logger.info(' {0}'.format(line)) # Return the final arguments. # This includes a pipeline to run, arguments, and whether to run remotely. return ( flags.pipeline_name, pipeline_args, 'remote' if flags.remote else None, )
def parse_github_credentials(github_config, argv_flags): """Determine the appropriate GitHub credentials. If there are no vaild credentials, error out with a useful message so that the user can get credentials. Args: config (dict): The user github configuration pulled from the user's configuration file (if any). argv_flags (argparse.Namespace): The flags pulled from the command line. Returns: dict: A dictionary with 'github_username' and 'github_token' keys. """ # Determine whether we have valid credentials. valid = all([ github_config.username or argv_flags.github_username, github_config.token or argv_flags.github_token, ]) # No valid credentials, give a helpful error. if not valid: logger.critical('No GitHub credentials found.') logger.error('Valid GitHub credentials are required if you are ' 'publishing to GitHub (--publish github).') logger.warn('') logger.warn('In order to generate the appropriate token, perform ' 'the following steps:') logger.warn(' 1. Open https://github.com/settings/tokens') logger.warn(' 2. Make a new access token with the "repo" scope.') logger.warn(' 3. Add this structure to ~/.artman/config.yaml: ') logger.warn('') logger.warn(' github:') logger.warn(' username: {username}') logger.warn(' token: {token}') logger.warn('') logger.error('This is a terminal error. Exiting.') sys.exit(64) # Return the appropriate credentials. return { 'username': argv_flags.github_username or github_config.username, 'token': argv_flags.github_token or github_config.token, }
def normalize_flags(flags, user_config): """Combine the argparse flags and user configuration together. Args: flags (argparse.Namespace): The flags parsed from sys.argv user_config (dict): The user configuration taken from ~/.artman/config.yaml. Returns: tuple (str, dict, str): 3-tuple containing: - pipeline name - pipeline arguments - 'remote' or None """ pipeline_args = {} # Determine logging verbosity and then set up logging. verbosity = support.resolve('verbosity', user_config, flags, default=INFO) setup_logging(verbosity) # Save local paths, if applicable. # This allows the user to override the path to api-client-staging or # toolkit on his or her machine. pipeline_args['local_paths'] = support.parse_local_paths( user_config, flags.googleapis) # In most cases, we get a language. if flags.language: pipeline_args['language'] = flags.language elif flags.pipeline_name != 'GapicConfigPipeline': logger.critical('--language is required for every pipeline except ' 'GapicConfigPipeline.') sys.exit(64) # If this is remote execution, configure that. if flags.remote: pipeline_id = str(uuid.uuid4()) # Use a unique random temp directory for remote execution. # TODO(ethanbao): Let remote artman decide the temp directory. pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format( id=pipeline_id, ) pipeline_args['pipeline_id'] = pipeline_id # Specify the default pipeline settings - this may change if # BATCH is set default_pipeline = 'GapicClientPipeline' # If we were given just an API or BATCH, then expand it into the --config # syntax. if flags.api: shared_config_name = 'common.yaml' if flags.language in ( 'ruby', 'nodejs', ): shared_config_name = 'doc.yaml' googleapis = os.path.realpath( os.path.expanduser(pipeline_args['local_paths']['googleapis'], )) flags.config = ','.join([ '{googleapis}/gapic/api/artman_{api}.yaml', '{googleapis}/gapic/lang/{shared_config_name}', ]).format( api=flags.api, googleapis=googleapis, shared_config_name=shared_config_name, ) elif flags.batch: googleapis = os.path.realpath( os.path.expanduser(pipeline_args['local_paths']['googleapis'], )) flags.config = '{googleapis}/gapic/batch/common.yaml'.format( googleapis=googleapis, ) default_pipeline = 'GapicClientBatchPipeline' if not flags.publish: # If publish flag was not set by the user, set it here. # This prevents the user config yaml from causing a # publish event when batch mode is used. flags.publish = 'noop' if flags.target: logger.critical('--target and --batch cannot both be specified; ' 'when using --batch, the repo must be the default ' 'specified in the artman config yaml file (or ' 'staging if no default is provided).') sys.exit(64) # Set the pipeline if none was specified if not flags.pipeline_name: flags.pipeline_name = default_pipeline # Determine where to publish. pipeline_args['publish'] = support.resolve( 'publish', user_config, flags, default='local', ) # Parse out the GitHub credentials iff we are publishing to GitHub. if pipeline_args['publish'] == 'github': pipeline_args['github'] = support.parse_github_credentials( argv_flags=flags, config=user_config.get('github', {}), ) # Parse out the full configuration. config_sections = ['common'] for config_spec in flags.config.split(','): config_args = config_util.load_config_spec( config_spec=config_spec, config_sections=config_sections, repl_vars={ k.upper(): v for k, v in pipeline_args['local_paths'].items() }, language=flags.language, ) pipeline_args.update(config_args) # Add any arbitrary keyword arguments. if flags.pipeline_kwargs != '{}': logger.warn('The use of --pipeline-kwargs is discouraged.') cmd_args = ast.literal_eval(flags.pipeline_kwargs) pipeline_args.update(cmd_args) # Coerce `git_repos` and `target_repo` into a single git_repo. if pipeline_args['publish'] in ('github', 'local') and not flags.batch: # Temporarily give our users a nice error if they have an older # googleapis checkout. # DEPRECATED: 2017-04-20 # REMOVE: 2017-05-20 if 'git_repo' in pipeline_args: logger.error('Your git repos are configured in your artman YAML ' 'using a older format. Please git pull.') sys.exit(96) # Pop the git repos off of the pipeline args and select the # correct one. repos = pipeline_args.pop('git_repos') pipeline_args['git_repo'] = support.select_git_repo( repos, flags.target) # Print out the final arguments to stdout, to help the user with # possible debugging. pipeline_args_repr = yaml.dump( pipeline_args, block_seq_indent=2, default_flow_style=False, indent=2, ) logger.info('Final args:') for line in pipeline_args_repr.split('\n'): if 'token' in line: index = line.index(':') line = line[:index + 2] + '<< REDACTED >>' logger.info(' {0}'.format(line)) # Return the final arguments. # This includes a pipeline to run, arguments, and whether to run remotely. return ( flags.pipeline_name, pipeline_args, 'remote' if flags.remote else None, )