def _make_option_parser(): usage = '%prog [options] <time-untouched> <URIs>' description = ( 'Delete all files in a given URI that are older than a specified' ' time.\n\nThe time parameter defines the threshold for removing' ' files. If the file has not been accessed for *time*, the file is' ' removed. The time argument is a number with an optional' ' single-character suffix specifying the units: m for minutes, h for' ' hours, d for days. If no suffix is specified, time is in hours.') option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '-t', '--test', dest='test', default=False, action='store_true', help="Don't actually delete any files; just log that we would") _add_basic_options(option_parser) _add_runner_options( option_parser, set(['region', 's3_endpoint']), ) _alphabetize_options(option_parser) return option_parser
def configure_options(self): """Define arguments for this script. Called from :py:meth:`__init__()`. Re-define to define custom command-line arguments or pass through existing ones:: def configure_options(self): super(MRYourJob, self).configure_options self.add_passthrough_option(...) self.add_file_option(...) self.pass_through_option(...) ... """ self.option_parser.add_option( '-h', '--help', dest='help', action='store_true', default=False, help='show this message and exit') self.option_parser.add_option( '--deprecated', dest='deprecated', action='store_true', default=False, help='include help for deprecated options') _add_basic_options(self.option_parser) _add_job_options(self.option_parser) _add_runner_options(self.option_parser, _pick_runner_opts())
def _make_option_parser(): usage = '%prog [options]' description = ('Terminate idle EMR clusters that meet the criteria' ' passed in on the command line (or, by default,' ' clusters that have been idle for one hour).') option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '--max-hours-idle', dest='max_hours_idle', default=None, type='float', help=('Max number of hours a cluster can go without bootstrapping,' ' running a step, or having a new step created. This will fire' ' even if there are pending steps which EMR has failed to' ' start. Make sure you set this higher than the amount of time' ' your jobs can take to start instances and bootstrap.')) option_parser.add_option( '--max-mins-locked', dest='max_mins_locked', default=_DEFAULT_MAX_MINUTES_LOCKED, type='float', help='Max number of minutes a cluster can be locked while idle.') option_parser.add_option( '--mins-to-end-of-hour', dest='mins_to_end_of_hour', default=None, type='float', help=('Terminate clusters that are within this many minutes of' ' the end of a full hour since the job started running' ' AND have no pending steps.')) option_parser.add_option('--unpooled-only', dest='unpooled_only', action='store_true', default=False, help='Only terminate un-pooled clusters') option_parser.add_option('--pooled-only', dest='pooled_only', action='store_true', default=False, help='Only terminate pooled clusters') option_parser.add_option( '--pool-name', dest='pool_name', default=None, help='Only terminate clusters in the given named pool.') option_parser.add_option( '--dry-run', dest='dry_run', default=False, action='store_true', help="Don't actually kill idle jobs; just log that we would") _add_basic_options(option_parser) _add_runner_options(option_parser, _filter_by_role(EMRJobRunner.OPT_NAMES, 'connect')) _alphabetize_options(option_parser) return option_parser
def _deprecated_option_group(self, opt_names, title): if not getattr(self, '_warned_about_opt_groups', None): log.warning('*_opt_group attributes are deprecated and going away' ' in v0.6.0') self._warned_about_opt_groups = True opt_group = OptionGroup(self._dummy_option_parser, title) _add_runner_options(opt_group, opt_names)
def _make_option_parser(): usage = '%prog [options]' description = ('Terminate idle EMR clusters that meet the criteria' ' passed in on the command line (or, by default,' ' clusters that have been idle for one hour).') option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '--max-hours-idle', dest='max_hours_idle', default=None, type='float', help=('Max number of hours a cluster can go without bootstrapping,' ' running a step, or having a new step created. This will fire' ' even if there are pending steps which EMR has failed to' ' start. Make sure you set this higher than the amount of time' ' your jobs can take to start instances and bootstrap.')) option_parser.add_option( '--max-mins-locked', dest='max_mins_locked', default=_DEFAULT_MAX_MINUTES_LOCKED, type='float', help='Max number of minutes a cluster can be locked while idle.') option_parser.add_option( '--mins-to-end-of-hour', dest='mins_to_end_of_hour', default=None, type='float', help=('Terminate clusters that are within this many minutes of' ' the end of a full hour since the job started running' ' AND have no pending steps.')) option_parser.add_option( '--unpooled-only', dest='unpooled_only', action='store_true', default=False, help='Only terminate un-pooled clusters') option_parser.add_option( '--pooled-only', dest='pooled_only', action='store_true', default=False, help='Only terminate pooled clusters') option_parser.add_option( '--pool-name', dest='pool_name', default=None, help='Only terminate clusters in the given named pool.') option_parser.add_option( '--dry-run', dest='dry_run', default=False, action='store_true', help="Don't actually kill idle jobs; just log that we would") _add_basic_options(option_parser) _add_runner_options( option_parser, _pick_runner_opts('emr', 'connect')) _alphabetize_options(option_parser) return option_parser
def _make_option_parser(): usage = '%prog [options]' description = ( 'Create a persistent EMR cluster to run jobs in, and print its ID to' ' stdout. WARNING: Do not run' ' this without mrjob terminate-idle-clusters in your' ' crontab; clusters left idle can quickly become expensive!') option_parser = OptionParser(usage=usage, description=description) _add_basic_options(option_parser) _add_runner_options(option_parser, (_pick_runner_opts('emr', 'connect') | _pick_runner_opts('emr', 'launch'))) _alphabetize_options(option_parser) return option_parser
def _make_option_parser(): usage = '%prog [options]' description = ( 'Create a persistent EMR cluster to run jobs in, and print its ID to' ' stdout. WARNING: Do not run' ' this without mrjob terminate-idle-clusters in your' ' crontab; clusters left idle can quickly become expensive!') option_parser = OptionParser(usage=usage, description=description) _add_basic_options(option_parser) _add_runner_options( option_parser, (_pick_runner_opts('emr', 'connect') | _pick_runner_opts('emr', 'launch'))) _alphabetize_options(option_parser) return option_parser
def _make_option_parser(): usage = '%prog [options]' description = 'Print a giant report on EMR usage.' option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '--max-days-ago', dest='max_days_ago', type='float', default=None, help=('Max number of days ago to look at jobs. By default, we go back' ' as far as EMR supports (currently about 2 months)')) _add_basic_options(option_parser) _add_runner_options( option_parser, _pick_runner_opts('emr', 'connect')) _alphabetize_options(option_parser) return option_parser
def _make_option_parser(): usage = '%prog [options] cluster-id' description = 'Terminate an existing EMR cluster.' option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '-t', '--test', dest='test', default=False, action='store_true', help="Don't actually delete any files; just log that we would") _add_basic_options(option_parser) _add_runner_options( option_parser, _pick_runner_opts('emr', 'connect')) _alphabetize_options(option_parser) return option_parser
def main(cl_args=None): usage = 'usage: %prog CLUSTER_ID [options] "command string"' description = ('Run a command on the master and all slaves of an EMR' ' cluster. Store stdout/stderr for results in OUTPUT_DIR.') option_parser = OptionParser(usage=usage, description=description) option_parser.add_option('-o', '--output-dir', dest='output_dir', default=None, help="Specify an output directory (default:" " CLUSTER_ID)") _add_basic_options(option_parser) _add_runner_options( option_parser, _pick_runner_opts('emr', 'connect') | set(['ssh_bin', 'ec2_key_pair_file'])) _alphabetize_options(option_parser) options, args = option_parser.parse_args(cl_args) MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose) runner_kwargs = options.__dict__.copy() for unused_arg in ('output_dir', 'quiet', 'verbose'): del runner_kwargs[unused_arg] if len(args) < 2: option_parser.print_help() sys.exit(1) cluster_id, cmd_string = args[:2] cmd_args = shlex_split(cmd_string) output_dir = os.path.abspath(options.output_dir or cluster_id) with EMRJobRunner(cluster_id=cluster_id, **runner_kwargs) as runner: _run_on_all_nodes(runner, output_dir, cmd_args)
def _make_option_parser(): usage = '%prog [options]' description = ('Report jobs running for more than a certain number of' ' hours (by default, %.1f). This can help catch buggy jobs' ' and Hadoop/EMR operational issues.' % DEFAULT_MIN_HOURS) option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '--min-hours', dest='min_hours', type='float', default=DEFAULT_MIN_HOURS, help=('Minimum number of hours a job can run before we report it.' ' Default: %default')) _add_basic_options(option_parser) _add_runner_options(option_parser, _pick_runner_opts('emr', 'connect')) _alphabetize_options(option_parser) return option_parser
def main(cl_args=None): usage = 'usage: %prog CLUSTER_ID [options] "command string"' description = ('Run a command on the master and all slaves of an EMR' ' cluster. Store stdout/stderr for results in OUTPUT_DIR.') option_parser = OptionParser(usage=usage, description=description) option_parser.add_option('-o', '--output-dir', dest='output_dir', default=None, help="Specify an output directory (default:" " CLUSTER_ID)") _add_basic_options(option_parser) _add_runner_options( option_parser, _pick_runner_opts('emr', 'connect') | set( ['ssh_bin', 'ec2_key_pair_file']) ) _alphabetize_options(option_parser) options, args = option_parser.parse_args(cl_args) MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose) runner_kwargs = options.__dict__.copy() for unused_arg in ('output_dir', 'quiet', 'verbose'): del runner_kwargs[unused_arg] if len(args) < 2: option_parser.print_help() sys.exit(1) cluster_id, cmd_string = args[:2] cmd_args = shlex_split(cmd_string) output_dir = os.path.abspath(options.output_dir or cluster_id) with EMRJobRunner(cluster_id=cluster_id, **runner_kwargs) as runner: _run_on_all_nodes(runner, output_dir, cmd_args)
def _make_option_parser(): usage = '%prog [options]' description = ('Report jobs running for more than a certain number of' ' hours (by default, %.1f). This can help catch buggy jobs' ' and Hadoop/EMR operational issues.' % DEFAULT_MIN_HOURS) option_parser = OptionParser(usage=usage, description=description) option_parser.add_option( '--min-hours', dest='min_hours', type='float', default=DEFAULT_MIN_HOURS, help=('Minimum number of hours a job can run before we report it.' ' Default: %default')) _add_basic_options(option_parser) _add_runner_options( option_parser, _pick_runner_opts('emr', 'connect') ) _alphabetize_options(option_parser) return option_parser
def configure_options(self): """Define arguments for this script. Called from :py:meth:`__init__()`. Re-define to define custom command-line arguments or pass through existing ones:: def configure_options(self): super(MRYourJob, self).configure_options self.add_passthrough_option(...) self.add_file_option(...) self.pass_through_option(...) ... """ self.option_parser.add_option( '--help', dest='help_main', action='store_true', default=False, help='show this message and exit') self.option_parser.add_option( '--help-dataproc', dest='help_dataproc', action='store_true', default=False, help='show Dataproc-related options') self.option_parser.add_option( '--help-emr', dest='help_emr', action='store_true', default=False, help='show EMR-related options') self.option_parser.add_option( '--help-hadoop', dest='help_hadoop', action='store_true', default=False, help='show Hadoop-related options') self.option_parser.add_option( '--help-local', dest='help_local', action='store_true', default=False, help='show local/inline runner-related options') self.option_parser.add_option( '--help-runner', dest='help_runner', action='store_true', default=False, help='show runner-related options') # protocol stuff self.proto_opt_group = OptionGroup( self.option_parser, 'Protocols') self.option_parser.add_option_group(self.proto_opt_group) _add_runner_options( self.proto_opt_group, set(['strict_protocols'])) # options for running the job (any runner) self.runner_opt_group = OptionGroup( self.option_parser, 'Running the entire job') self.option_parser.add_option_group(self.runner_opt_group) _add_basic_options(self.runner_opt_group) _add_job_options(self.runner_opt_group) _add_runner_options( self.runner_opt_group, _pick_runner_opts('base') - set(['strict_protocols'])) # options for inline/local runners self.local_opt_group = OptionGroup( self.option_parser, 'Running locally (these apply when you set -r inline or -r local)') self.option_parser.add_option_group(self.local_opt_group) _add_runner_options( self.local_opt_group, _pick_runner_opts('local') - _pick_runner_opts('base')) # options common to Hadoop and EMR self.hadoop_emr_opt_group = OptionGroup( self.option_parser, 'Running on Hadoop or EMR (these apply when you set -r hadoop or' ' -r emr)') self.option_parser.add_option_group(self.hadoop_emr_opt_group) _add_runner_options( self.hadoop_emr_opt_group, ((_pick_runner_opts('emr') & _pick_runner_opts('hadoop')) - _pick_runner_opts('base'))) # options for running the job on Hadoop self.hadoop_opt_group = OptionGroup( self.option_parser, 'Running on Hadoop (these apply when you set -r hadoop)') self.option_parser.add_option_group(self.hadoop_opt_group) _add_runner_options( self.hadoop_opt_group, (_pick_runner_opts('hadoop') - _pick_runner_opts('emr') - _pick_runner_opts('base'))) # options for running the job on Dataproc or EMR self.dataproc_emr_opt_group = OptionGroup( self.option_parser, 'Running on Dataproc or EMR (these apply when you set -r dataproc' ' or -r emr)') self.option_parser.add_option_group(self.dataproc_emr_opt_group) _add_runner_options( self.dataproc_emr_opt_group, ((_pick_runner_opts('dataproc') & _pick_runner_opts('emr')) - _pick_runner_opts('base'))) # options for running the job on Dataproc self.dataproc_opt_group = OptionGroup( self.option_parser, 'Running on Dataproc (these apply when you set -r dataproc)') self.option_parser.add_option_group(self.dataproc_opt_group) _add_runner_options( self.dataproc_opt_group, (_pick_runner_opts('dataproc') - _pick_runner_opts('emr') - _pick_runner_opts('base'))) # options for running the job on EMR self.emr_opt_group = OptionGroup( self.option_parser, 'Running on EMR (these apply when you set -r emr)') self.option_parser.add_option_group(self.emr_opt_group) _add_runner_options( self.emr_opt_group, (_pick_runner_opts('emr') - _pick_runner_opts('hadoop') - _pick_runner_opts('dataproc') - _pick_runner_opts('base')))
def configure_options(self): """Define arguments for this script. Called from :py:meth:`__init__()`. Re-define to define custom command-line arguments or pass through existing ones:: def configure_options(self): super(MRYourJob, self).configure_options self.add_passthrough_option(...) self.add_file_option(...) self.pass_through_option(...) ... """ self.option_parser.add_option('--help', dest='help_main', action='store_true', default=False, help='show this message and exit') self.option_parser.add_option('--help-dataproc', dest='help_dataproc', action='store_true', default=False, help='show Dataproc-related options') self.option_parser.add_option('--help-emr', dest='help_emr', action='store_true', default=False, help='show EMR-related options') self.option_parser.add_option('--help-hadoop', dest='help_hadoop', action='store_true', default=False, help='show Hadoop-related options') self.option_parser.add_option( '--help-local', dest='help_local', action='store_true', default=False, help='show local/inline runner-related options') self.option_parser.add_option('--help-runner', dest='help_runner', action='store_true', default=False, help='show runner-related options') # protocol stuff self.proto_opt_group = OptionGroup(self.option_parser, 'Protocols') self.option_parser.add_option_group(self.proto_opt_group) _add_runner_options(self.proto_opt_group, set(['strict_protocols'])) # options for running the job (any runner) self.runner_opt_group = OptionGroup(self.option_parser, 'Running the entire job') self.option_parser.add_option_group(self.runner_opt_group) _add_basic_options(self.runner_opt_group) _add_job_options(self.runner_opt_group) _add_runner_options( self.runner_opt_group, _pick_runner_opts('base') - set(['strict_protocols'])) # options for inline/local runners self.local_opt_group = OptionGroup( self.option_parser, 'Running locally (these apply when you set -r inline or -r local)') self.option_parser.add_option_group(self.local_opt_group) _add_runner_options( self.local_opt_group, _pick_runner_opts('local') - _pick_runner_opts('base')) # options common to Hadoop and EMR self.hadoop_emr_opt_group = OptionGroup( self.option_parser, 'Running on Hadoop or EMR (these apply when you set -r hadoop or' ' -r emr)') self.option_parser.add_option_group(self.hadoop_emr_opt_group) _add_runner_options( self.hadoop_emr_opt_group, ((_pick_runner_opts('emr') & _pick_runner_opts('hadoop')) - _pick_runner_opts('base'))) # options for running the job on Hadoop self.hadoop_opt_group = OptionGroup( self.option_parser, 'Running on Hadoop (these apply when you set -r hadoop)') self.option_parser.add_option_group(self.hadoop_opt_group) _add_runner_options( self.hadoop_opt_group, (_pick_runner_opts('hadoop') - _pick_runner_opts('emr') - _pick_runner_opts('base'))) # options for running the job on Dataproc or EMR self.dataproc_emr_opt_group = OptionGroup( self.option_parser, 'Running on Dataproc or EMR (these apply when you set -r dataproc' ' or -r emr)') self.option_parser.add_option_group(self.dataproc_emr_opt_group) _add_runner_options( self.dataproc_emr_opt_group, ((_pick_runner_opts('dataproc') & _pick_runner_opts('emr')) - _pick_runner_opts('base'))) # options for running the job on Dataproc self.dataproc_opt_group = OptionGroup( self.option_parser, 'Running on Dataproc (these apply when you set -r dataproc)') self.option_parser.add_option_group(self.dataproc_opt_group) _add_runner_options( self.dataproc_opt_group, (_pick_runner_opts('dataproc') - _pick_runner_opts('emr') - _pick_runner_opts('base'))) # options for running the job on EMR self.emr_opt_group = OptionGroup( self.option_parser, 'Running on EMR (these apply when you set -r emr)') self.option_parser.add_option_group(self.emr_opt_group) _add_runner_options( self.emr_opt_group, (_pick_runner_opts('emr') - _pick_runner_opts('hadoop') - _pick_runner_opts('dataproc') - _pick_runner_opts('base')))