def read_user_config(flags): """Read the user config from disk and return it. Args: flags (argparse.Namespace): The flags from sys.argv. Returns: dict: The user config. """ # Load the user configuration if it exists and save a dictionary. user_config = {} user_config_file = os.path.realpath(os.path.expanduser(flags.user_config)) if os.path.isfile(user_config_file): with io.open(user_config_file) as ucf: user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {} # Sanity check: Is there a configuration? If not, abort. if not user_config: setup_logging(INFO) logger.critical('No user configuration found.') logger.warn('This is probably your first time running Artman.') logger.warn('Run `configure-artman` to get yourself set up.') sys.exit(64) # Done; return the user config. return user_config
def select_git_repo(git_repos, target_repo): """Select the appropriate Git repo based on YAML config and CLI arguments. Args: git_repos (dict): Information about git repositories. target_repo (str): The user-selected target repository. May be None. Returns: dict: The selected GitHub repo. """ # If there is a specified target_repo, this task is trivial; just grab # that git repo. Otherwise, find the default. if target_repo: git_repo = git_repos.get(target_repo) if not git_repo: logger.critical('The requested target repo is not defined ' 'for that API and language.') sys.exit(32) return git_repo # Okay, none is specified. Check for a default, and use "staging" if no # default is defined. for repo in git_repos.values(): if repo.get('default', False): return repo return git_repos['staging']
def select_git_repo(git_repos, target_repo): """Select the appropriate Git repo based on YAML config and CLI arguments. Args: git_repos (dict): Information about git repositories. target_repo (str): The user-selected target repository. May be None. Returns: dict: The selected GitHub repo. """ # If there is a specified target_repo, this task is trivial; just grab # that git repo. Otherwise, find the default. if target_repo: git_repo = git_repos.get(target_repo) if not git_repo: logger.critical('The requested target repo is not defined ' 'for that API and language.') sys.exit(32) return git_repo # Okay, none is specified. Check for a default, and use "staging" if no # default is defined. for repo in git_repos.values(): if repo.get('default', False): return repo return git_repos['staging']
def read_user_config(flags): """Read the user config from disk and return it. Args: flags (argparse.Namespace): The flags from sys.argv. Returns: dict: The user config. """ # Load the user configuration if it exists and save a dictionary. user_config = {} user_config_file = os.path.realpath(os.path.expanduser(flags.user_config)) if os.path.isfile(user_config_file): with io.open(user_config_file) as ucf: user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {} # Sanity check: Is there a configuration? If not, abort. if not user_config: setup_logging(INFO) logger.critical('No user configuration found.') logger.warn('This is probably your first time running Artman.') logger.warn('Run `configure-artman` to get yourself set up.') sys.exit(64) # Done; return the user config. return user_config
def _parse_args(*args): parser = _CreateArgumentParser() flags = parser.parse_args(args=args) if not flags.queue_name: setup_logging(INFO) logger.critical('Required --queue-name flag not specified') sys.exit(1) return flags
def _parse_args(*args): parser = _CreateArgumentParser() flags = parser.parse_args(args=args) if not flags.queue_name: setup_logging(INFO) logger.critical('Required --queue-name flag not specified') sys.exit(1) return flags
def _print_log(pipeline_id): # Fetch the cloud logging entry if the exection fails. Wait for 30 secs, # because it takes a while for the logging to become available. logger.critical( 'The remote pipeline execution failed. It will wait for 30 ' 'seconds before fetching the log for remote pipeline execution.', ) time.sleep(30) client = logging.Client() pipeline_logger = client.logger(pipeline_id) entries, token = pipeline_logger.list_entries() for entry in entries: logger.error(entry.payload) logger.info( 'You can always run the following command to fetch the log entry:\n' ' gcloud beta logging read "logName=projects/vkit-pipeline/logs/%s"' % pipeline_id, )
def parse_github_credentials(config, argv_flags): """Determine the appropriate GitHub credentials. If there are no vaild credentials, error out with a useful message so that the user can get credentials. Args: config (dict): The user github configuration pulled from the user's configuration file (if any). argv_flags (argparse.Namespace): The flags pulled from the command line. Returns: dict: A dictionary with 'github_username' and 'github_token' keys. """ # Determine whether we have valid credentials. valid = all([ 'username' in config or argv_flags.github_username, 'token' in config or argv_flags.github_token, ]) # No valid credentials, give a helpful error. if not valid: logger.critical('No GitHub credentials found.') logger.error('Valid GitHub credentials are required if you are ' 'publishing to GitHub (--publish github).') logger.warn('') logger.warn('In order to generate the appropriate token, perform ' 'the following steps:') logger.warn(' 1. Open https://github.com/settings/tokens') logger.warn(' 2. Make a new access token with the "repo" scope.') logger.warn(' 3. Add this structure to ~/.artman/config.yaml: ') logger.warn('') logger.warn(' github:') logger.warn(' username: {username}') logger.warn(' token: {token}') logger.warn('') logger.error('This is a terminal error. Exiting.') sys.exit(64) # Return the appropriate credentials. return { 'username': argv_flags.github_username or config['username'], 'token': argv_flags.github_token or config['token'], }
def parse_github_credentials(github_config, argv_flags): """Determine the appropriate GitHub credentials. If there are no vaild credentials, error out with a useful message so that the user can get credentials. Args: config (dict): The user github configuration pulled from the user's configuration file (if any). argv_flags (argparse.Namespace): The flags pulled from the command line. Returns: dict: A dictionary with 'github_username' and 'github_token' keys. """ # Determine whether we have valid credentials. valid = all([ github_config.username or argv_flags.github_username, github_config.token or argv_flags.github_token, ]) # No valid credentials, give a helpful error. if not valid: logger.critical('No GitHub credentials found.') logger.error('Valid GitHub credentials are required if you are ' 'publishing to GitHub (--publish github).') logger.warn('') logger.warn('In order to generate the appropriate token, perform ' 'the following steps:') logger.warn(' 1. Open https://github.com/settings/tokens') logger.warn(' 2. Make a new access token with the "repo" scope.') logger.warn(' 3. Add this structure to ~/.artman/config.yaml: ') logger.warn('') logger.warn(' github:') logger.warn(' username: {username}') logger.warn(' token: {token}') logger.warn('') logger.error('This is a terminal error. Exiting.') sys.exit(64) # Return the appropriate credentials. return { 'username': argv_flags.github_username or github_config.username, 'token': argv_flags.github_token or github_config.token, }
def _print_log(pipeline_id): # Fetch the cloud logging entry if the exection fails. Wait for 30 secs, # because it takes a while for the logging to become available. logger.critical( 'The remote pipeline execution failed. It will wait for 30 ' 'seconds before fetching the log for remote pipeline execution.', ) time.sleep(30) client = logging.Client() pipeline_logger = client.logger(pipeline_id) entries, token = pipeline_logger.list_entries() for entry in entries: logger.error(entry.payload) logger.info( 'You can always run the following command to fetch the log entry:\n' ' gcloud beta logging read "logName=projects/vkit-pipeline/logs/%s"' % pipeline_id, )
def normalize_flags(flags, user_config): """Combine the argparse flags and user configuration together. Args: flags (argparse.Namespace): The flags parsed from sys.argv user_config (dict): The user configuration taken from ~/.artman/config.yaml. Returns: tuple (str, dict, str): 3-tuple containing: - pipeline name - pipeline arguments - 'remote' or None """ pipeline_args = {} # Determine logging verbosity and then set up logging. verbosity = support.resolve('verbosity', user_config, flags, default=INFO) setup_logging(verbosity) # Save local paths, if applicable. # This allows the user to override the path to api-client-staging or # toolkit on his or her machine. pipeline_args['local_paths'] = support.parse_local_paths(user_config, flags.googleapis) # In most cases, we get a language. if flags.language: pipeline_args['language'] = flags.language elif flags.pipeline_name != 'GapicConfigPipeline': logger.critical('--language is required for every pipeline except ' 'GapicConfigPipeline.') sys.exit(64) # If this is remote execution, configure that. if flags.remote: pipeline_id = str(uuid.uuid4()) # Use a unique random temp directory for remote execution. # TODO(ethanbao): Let remote artman decide the temp directory. pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format( id=pipeline_id, ) pipeline_args['pipeline_id'] = pipeline_id # Specify the default pipeline settings - this may change if # BATCH is set default_pipeline = 'GapicClientPipeline' # If we were given just an API or BATCH, then expand it into the --config # syntax. if flags.api: shared_config_name = 'common.yaml' if flags.language == 'ruby': shared_config_name = 'doc.yaml' googleapis = os.path.realpath(os.path.expanduser( pipeline_args['local_paths']['googleapis'], )) flags.config = ','.join([ '{googleapis}/gapic/api/artman_{api}.yaml', '{googleapis}/gapic/lang/{shared_config_name}', ]).format( api=flags.api, googleapis=googleapis, shared_config_name=shared_config_name, ) elif flags.batch: googleapis = os.path.realpath(os.path.expanduser( pipeline_args['local_paths']['googleapis'], )) flags.config = '{googleapis}/gapic/batch/common.yaml'.format( googleapis=googleapis, ) default_pipeline = 'GapicClientBatchPipeline' if not flags.publish: # If publish flag was not set by the user, set it here. # This prevents the user config yaml from causing a # publish event when batch mode is used. flags.publish = 'noop' if flags.target: logger.critical('--target and --batch cannot both be specified; ' 'when using --batch, the repo must be the default ' 'specified in the artman config yaml file (or ' 'staging if no default is provided).') sys.exit(64) # Set the pipeline if none was specified if not flags.pipeline_name: flags.pipeline_name = default_pipeline # Determine where to publish. pipeline_args['publish'] = support.resolve('publish', user_config, flags, default='local', ) # Parse out the GitHub credentials iff we are publishing to GitHub. if pipeline_args['publish'] == 'github': pipeline_args['github'] = support.parse_github_credentials( argv_flags=flags, config=user_config.get('github', {}), ) # Parse out the full configuration. config_sections = ['common'] for config_spec in flags.config.split(','): config_args = config_util.load_config_spec( config_spec=config_spec, config_sections=config_sections, repl_vars={k.upper(): v for k, v in pipeline_args['local_paths'].items()}, language=flags.language, ) pipeline_args.update(config_args) # Add any arbitrary keyword arguments. if flags.pipeline_kwargs != '{}': logger.warn('The use of --pipeline-kwargs is discouraged.') cmd_args = ast.literal_eval(flags.pipeline_kwargs) pipeline_args.update(cmd_args) # Coerce `git_repos` and `target_repo` into a single git_repo. if pipeline_args['publish'] in ('github', 'local') and not flags.batch: # Temporarily give our users a nice error if they have an older # googleapis checkout. # DEPRECATED: 2017-04-20 # REMOVE: 2017-05-20 if 'git_repo' in pipeline_args: logger.error('Your git repos are configured in your artman YAML ' 'using a older format. Please git pull.') sys.exit(96) # Pop the git repos off of the pipeline args and select the # correct one. repos = pipeline_args.pop('git_repos') pipeline_args['git_repo'] = support.select_git_repo(repos, flags.target) # Print out the final arguments to stdout, to help the user with # possible debugging. pipeline_args_repr = yaml.dump(pipeline_args, block_seq_indent=2, default_flow_style=False, indent=2, ) logger.info('Final args:') for line in pipeline_args_repr.split('\n'): if 'token' in line: index = line.index(':') line = line[:index + 2] + '<< REDACTED >>' logger.info(' {0}'.format(line)) # Return the final arguments. # This includes a pipeline to run, arguments, and whether to run remotely. return ( flags.pipeline_name, pipeline_args, 'remote' if flags.remote else None, )
def normalize_flags(flags, user_config): """Combine the argparse flags and user configuration together. Args: flags (argparse.Namespace): The flags parsed from sys.argv user_config (dict): The user configuration taken from ~/.artman/config.yaml. Returns: tuple (str, dict, str): 3-tuple containing: - pipeline name - pipeline arguments - 'remote' or None """ pipeline_args = {} # Determine logging verbosity and then set up logging. verbosity = support.resolve('verbosity', user_config, flags, default=INFO) setup_logging(verbosity) # Save local paths, if applicable. # This allows the user to override the path to api-client-staging or # toolkit on his or her machine. pipeline_args['local_paths'] = support.parse_local_paths( user_config, flags.googleapis) # In most cases, we get a language. if flags.language: pipeline_args['language'] = flags.language elif flags.pipeline_name != 'GapicConfigPipeline': logger.critical('--language is required for every pipeline except ' 'GapicConfigPipeline.') sys.exit(64) # If this is remote execution, configure that. if flags.remote: pipeline_id = str(uuid.uuid4()) # Use a unique random temp directory for remote execution. # TODO(ethanbao): Let remote artman decide the temp directory. pipeline_args['local_paths']['reporoot'] = '/tmp/artman/{id}'.format( id=pipeline_id, ) pipeline_args['pipeline_id'] = pipeline_id # Specify the default pipeline settings - this may change if # BATCH is set default_pipeline = 'GapicClientPipeline' # If we were given just an API or BATCH, then expand it into the --config # syntax. if flags.api: shared_config_name = 'common.yaml' if flags.language in ( 'ruby', 'nodejs', ): shared_config_name = 'doc.yaml' googleapis = os.path.realpath( os.path.expanduser(pipeline_args['local_paths']['googleapis'], )) flags.config = ','.join([ '{googleapis}/gapic/api/artman_{api}.yaml', '{googleapis}/gapic/lang/{shared_config_name}', ]).format( api=flags.api, googleapis=googleapis, shared_config_name=shared_config_name, ) elif flags.batch: googleapis = os.path.realpath( os.path.expanduser(pipeline_args['local_paths']['googleapis'], )) flags.config = '{googleapis}/gapic/batch/common.yaml'.format( googleapis=googleapis, ) default_pipeline = 'GapicClientBatchPipeline' if not flags.publish: # If publish flag was not set by the user, set it here. # This prevents the user config yaml from causing a # publish event when batch mode is used. flags.publish = 'noop' if flags.target: logger.critical('--target and --batch cannot both be specified; ' 'when using --batch, the repo must be the default ' 'specified in the artman config yaml file (or ' 'staging if no default is provided).') sys.exit(64) # Set the pipeline if none was specified if not flags.pipeline_name: flags.pipeline_name = default_pipeline # Determine where to publish. pipeline_args['publish'] = support.resolve( 'publish', user_config, flags, default='local', ) # Parse out the GitHub credentials iff we are publishing to GitHub. if pipeline_args['publish'] == 'github': pipeline_args['github'] = support.parse_github_credentials( argv_flags=flags, config=user_config.get('github', {}), ) # Parse out the full configuration. config_sections = ['common'] for config_spec in flags.config.split(','): config_args = config_util.load_config_spec( config_spec=config_spec, config_sections=config_sections, repl_vars={ k.upper(): v for k, v in pipeline_args['local_paths'].items() }, language=flags.language, ) pipeline_args.update(config_args) # Add any arbitrary keyword arguments. if flags.pipeline_kwargs != '{}': logger.warn('The use of --pipeline-kwargs is discouraged.') cmd_args = ast.literal_eval(flags.pipeline_kwargs) pipeline_args.update(cmd_args) # Coerce `git_repos` and `target_repo` into a single git_repo. if pipeline_args['publish'] in ('github', 'local') and not flags.batch: # Temporarily give our users a nice error if they have an older # googleapis checkout. # DEPRECATED: 2017-04-20 # REMOVE: 2017-05-20 if 'git_repo' in pipeline_args: logger.error('Your git repos are configured in your artman YAML ' 'using a older format. Please git pull.') sys.exit(96) # Pop the git repos off of the pipeline args and select the # correct one. repos = pipeline_args.pop('git_repos') pipeline_args['git_repo'] = support.select_git_repo( repos, flags.target) # Print out the final arguments to stdout, to help the user with # possible debugging. pipeline_args_repr = yaml.dump( pipeline_args, block_seq_indent=2, default_flow_style=False, indent=2, ) logger.info('Final args:') for line in pipeline_args_repr.split('\n'): if 'token' in line: index = line.index(':') line = line[:index + 2] + '<< REDACTED >>' logger.info(' {0}'.format(line)) # Return the final arguments. # This includes a pipeline to run, arguments, and whether to run remotely. return ( flags.pipeline_name, pipeline_args, 'remote' if flags.remote else None, )