def main(cl_args=None): # parser command-line args arg_parser = _make_arg_parser() options = arg_parser.parse_args(cl_args) MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose) # create the persistent job runner = EMRJobRunner(**_runner_kwargs(options)) log.debug('Terminating cluster %s' % options.cluster_id) runner.make_emr_client().terminate_job_flows( JobFlowIds=[options.cluster_id]) log.info('Terminated cluster %s' % options.cluster_id)
def main(cl_args=None): # parser command-line args option_parser = _make_option_parser() options, args = option_parser.parse_args(cl_args) if len(args) != 1: option_parser.error('This tool takes exactly one argument.') cluster_id = args[0] MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose) # create the persistent job runner = EMRJobRunner(**_runner_kwargs(options)) log.debug('Terminating cluster %s' % cluster_id) runner.make_emr_client().terminate_job_flows( JobFlowIds=[cluster_id]) log.info('Terminated cluster %s' % cluster_id)
def main(cl_args=None): arg_parser = _make_arg_parser() options = arg_parser.parse_args(cl_args) MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose) runner_kwargs = { k: v for k, v in options.__dict__.items() if k not in ('quiet', 'verbose', 'step_id') } runner = EMRJobRunner(**runner_kwargs) emr_client = runner.make_emr_client() # pick step step = _get_step(emr_client, options.cluster_id, options.step_id) if not step: raise SystemExit(1) if step['Status']['State'] != 'FAILED': log.warning('step %s has state %s, not FAILED' % (step['Id'], step['Status']['State'])) # interpret logs log.info('Diagnosing step %s (%s)' % (step['Id'], step['Name'])) log_interpretation = dict(step_id=step['Id']) step_type = _infer_step_type(step) error = runner._pick_error(log_interpretation, step_type) # print error if error: log.error('Probable cause of failure:\n\n%s\n\n' % _format_error(error)) else: log.warning('No error detected')
def main(cl_args=None): arg_parser = _make_arg_parser() options = arg_parser.parse_args(cl_args) MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose) runner_kwargs = {k: v for k, v in options.__dict__.items() if k not in ('quiet', 'verbose', 'step_id')} runner = EMRJobRunner(**runner_kwargs) emr_client = runner.make_emr_client() # pick step step = _get_step(emr_client, options.cluster_id, options.step_id) if not step: raise SystemExit(1) if step['Status']['State'] != 'FAILED': log.warning('step %s has state %s, not FAILED' % (step['Id'], step['Status']['State'])) # interpret logs log.info('Diagnosing step %s (%s)' % (step['Id'], step['Name'])) log_interpretation = dict(step_id=step['Id']) step_type = _infer_step_type(step) error = runner._pick_error(log_interpretation, step_type) # print error if error: log.error('Probable cause of failure:\n\n%s\n\n' % _format_error(error)) else: log.warning('No error detected')
def _maybe_terminate_clusters(dry_run=False, max_mins_idle=None, now=None, pool_name=None, pooled_only=False, unpooled_only=False, max_mins_locked=None, quiet=False, **kwargs): if now is None: now = _boto3_now() # old default behavior if max_mins_idle is None: max_mins_idle = _DEFAULT_MAX_MINS_IDLE runner = EMRJobRunner(**kwargs) emr_client = runner.make_emr_client() num_starting = 0 num_bootstrapping = 0 num_done = 0 num_idle = 0 num_pending = 0 num_running = 0 # include RUNNING to catch clusters with PENDING jobs that # never ran (see #365). for cluster_summary in _boto3_paginate( 'Clusters', emr_client, 'list_clusters', ClusterStates=['WAITING', 'RUNNING']): cluster_id = cluster_summary['Id'] # check if cluster is done if _is_cluster_done(cluster_summary): num_done += 1 continue # check if cluster is starting if _is_cluster_starting(cluster_summary): num_starting += 1 continue # check if cluster is bootstrapping if _is_cluster_bootstrapping(cluster_summary): num_bootstrapping += 1 continue # need steps to learn more about cluster steps = list( reversed( list( _boto3_paginate('Steps', emr_client, 'list_steps', ClusterId=cluster_id)))) if any(_is_step_running(step) for step in steps): num_running += 1 continue # cluster is idle time_idle = now - _time_last_active(cluster_summary, steps) is_pending = _cluster_has_pending_steps(steps) # need to get actual cluster to see tags cluster = emr_client.describe_cluster(ClusterId=cluster_id)['Cluster'] _, pool = _pool_hash_and_name(cluster) if is_pending: num_pending += 1 else: num_idle += 1 log.debug('cluster %s %s for %s, %s (%s) - %s' % ( cluster_id, 'pending' if is_pending else 'idle', strip_microseconds(time_idle), ('unpooled' if pool is None else 'in %s pool' % pool), cluster_summary['Name'], 'protected' if cluster['TerminationProtected'] else 'unprotected', )) # filter out clusters that don't meet our criteria if (max_mins_idle is not None and time_idle <= timedelta(minutes=max_mins_idle)): continue if (pooled_only and pool is None): continue if (unpooled_only and pool is not None): continue if (pool_name is not None and pool != pool_name): continue if cluster['TerminationProtected']: continue # terminate idle cluster _terminate_and_notify(runner=runner, cluster_id=cluster_id, cluster_name=cluster_summary['Name'], num_steps=len(steps), is_pending=is_pending, time_idle=time_idle, dry_run=dry_run, max_mins_locked=max_mins_locked, quiet=quiet) log.info('Cluster statuses: %d starting, %d bootstrapping, %d running,' ' %d pending, %d idle, %d done' % (num_starting, num_bootstrapping, num_running, num_pending, num_idle, num_done))
def _maybe_terminate_clusters(dry_run=False, max_hours_idle=None, mins_to_end_of_hour=None, now=None, pool_name=None, pooled_only=False, unpooled_only=False, max_mins_locked=None, quiet=False, **kwargs): if now is None: now = _boto3_now() # old default behavior if max_hours_idle is None and mins_to_end_of_hour is None: max_hours_idle = _DEFAULT_MAX_HOURS_IDLE runner = EMRJobRunner(**kwargs) emr_client = runner.make_emr_client() num_starting = 0 num_bootstrapping = 0 num_done = 0 num_idle = 0 num_pending = 0 num_running = 0 # We don't filter by cluster state because we want this to work even # if Amazon adds another kind of idle state. for cluster_summary in _boto3_paginate('Clusters', emr_client, 'list_clusters'): cluster_id = cluster_summary['Id'] # check if cluster is done if _is_cluster_done(cluster_summary): num_done += 1 continue # check if cluster is starting if _is_cluster_starting(cluster_summary): num_starting += 1 continue # check if cluster is bootstrapping if _is_cluster_bootstrapping(cluster_summary): num_bootstrapping += 1 continue # need steps to learn more about cluster steps = list( reversed( list( _boto3_paginate('Steps', emr_client, 'list_steps', ClusterId=cluster_id)))) if any(_is_step_running(step) for step in steps): num_running += 1 continue # cluster is idle time_idle = now - _time_last_active(cluster_summary, steps) time_to_end_of_hour = _est_time_to_hour(cluster_summary, now=now) is_pending = _cluster_has_pending_steps(steps) bootstrap_actions = list( _boto3_paginate('BootstrapActions', emr_client, 'list_bootstrap_actions', ClusterId=cluster_id)) _, pool = _pool_hash_and_name(bootstrap_actions) if is_pending: num_pending += 1 else: num_idle += 1 log.debug('cluster %s %s for %s, %s to end of hour, %s (%s)' % (cluster_id, 'pending' if is_pending else 'idle', strip_microseconds(time_idle), strip_microseconds(time_to_end_of_hour), ('unpooled' if pool is None else 'in %s pool' % pool), cluster_summary['Name'])) # filter out clusters that don't meet our criteria if (max_hours_idle is not None and time_idle <= timedelta(hours=max_hours_idle)): continue # mins_to_end_of_hour doesn't apply to jobs with pending steps if (mins_to_end_of_hour is not None and (is_pending or time_to_end_of_hour >= timedelta(minutes=mins_to_end_of_hour))): continue if (pooled_only and pool is None): continue if (unpooled_only and pool is not None): continue if (pool_name is not None and pool != pool_name): continue # terminate idle cluster _terminate_and_notify(runner=runner, cluster_id=cluster_id, cluster_name=cluster_summary['Name'], num_steps=len(steps), is_pending=is_pending, time_idle=time_idle, time_to_end_of_hour=time_to_end_of_hour, dry_run=dry_run, max_mins_locked=max_mins_locked, quiet=quiet) log.info('Cluster statuses: %d starting, %d bootstrapping, %d running,' ' %d pending, %d idle, %d done' % (num_starting, num_bootstrapping, num_running, num_pending, num_idle, num_done))
def _maybe_terminate_clusters(dry_run=False, max_mins_idle=None, now=None, pool_name=None, pooled_only=False, unpooled_only=False, max_mins_locked=None, quiet=False, **kwargs): if now is None: now = _boto3_now() # old default behavior if max_mins_idle is None: max_mins_idle = _DEFAULT_MAX_MINS_IDLE runner = EMRJobRunner(**kwargs) emr_client = runner.make_emr_client() num_starting = 0 num_bootstrapping = 0 num_done = 0 num_idle = 0 num_pending = 0 num_running = 0 # We don't filter by cluster state because we want this to work even # if Amazon adds another kind of idle state. for cluster_summary in _boto3_paginate( 'Clusters', emr_client, 'list_clusters'): cluster_id = cluster_summary['Id'] # check if cluster is done if _is_cluster_done(cluster_summary): num_done += 1 continue # check if cluster is starting if _is_cluster_starting(cluster_summary): num_starting += 1 continue # check if cluster is bootstrapping if _is_cluster_bootstrapping(cluster_summary): num_bootstrapping += 1 continue # need steps to learn more about cluster steps = list(reversed(list(_boto3_paginate( 'Steps', emr_client, 'list_steps', ClusterId=cluster_id)))) if any(_is_step_running(step) for step in steps): num_running += 1 continue # cluster is idle time_idle = now - _time_last_active(cluster_summary, steps) is_pending = _cluster_has_pending_steps(steps) # need to get actual cluster to see tags cluster = emr_client.describe_cluster(ClusterId=cluster_id)['Cluster'] _, pool = _pool_hash_and_name(cluster) if is_pending: num_pending += 1 else: num_idle += 1 log.debug( 'cluster %s %s for %s, %s (%s)' % (cluster_id, 'pending' if is_pending else 'idle', strip_microseconds(time_idle), ('unpooled' if pool is None else 'in %s pool' % pool), cluster_summary['Name'])) # filter out clusters that don't meet our criteria if (max_mins_idle is not None and time_idle <= timedelta(minutes=max_mins_idle)): continue if (pooled_only and pool is None): continue if (unpooled_only and pool is not None): continue if (pool_name is not None and pool != pool_name): continue # terminate idle cluster _terminate_and_notify( runner=runner, cluster_id=cluster_id, cluster_name=cluster_summary['Name'], num_steps=len(steps), is_pending=is_pending, time_idle=time_idle, dry_run=dry_run, max_mins_locked=max_mins_locked, quiet=quiet) log.info( 'Cluster statuses: %d starting, %d bootstrapping, %d running,' ' %d pending, %d idle, %d done' % ( num_starting, num_bootstrapping, num_running, num_pending, num_idle, num_done))