def get_options(self): return [ SSH_INSTANCE_ARGUMENT, SSH_USER_OPTION, SSH_OPTIONS, EXECUTOR_SANDBOX_OPTION, CommandOption( '--tunnels', '-L', dest='tunnels', action='append', metavar='PORT:NAME', default=[], help="Add tunnel from local port PART to remote named port NAME" ), CommandOption( '--command', '-c', dest='command', type=str, default=None, metavar="unix_command_line", help="Command to execute through the ssh connection."), CommandOption( '--pid-file', '-p', dest='pid_file', type=str, default=None, metavar="pid_file", help="File in which to store the PID of the resulting ssh call" ) ]
def get_options(self): return [BIND_OPTION, JSON_READ_OPTION, CommandOption("--local", dest="local", default=False, action="store_true", help='Inspect the configuration as would be created by the "job create" command.'), CommandOption("--raw", dest="raw", default=False, action="store_true", help="Show the raw configuration."), JOBSPEC_ARGUMENT, CONFIG_ARGUMENT]
def get_options(self): return [ BATCH_OPTION, BIND_OPTION, BROWSER_OPTION, FORCE_OPTION, HEALTHCHECK_OPTION, JSON_READ_OPTION, WATCH_OPTION, CommandOption( '--max-per-instance-failures', type=int, default=0, help= 'Maximum number of restarts per instance during restart. Increments total ' 'failure count when this limit is exceeded.'), CommandOption( '--restart-threshold', type=int, default=60, help= 'Maximum number of seconds before a shard must move into the RUNNING state ' 'before considered a failure.'), CONFIG_OPTION, MAX_TOTAL_FAILURES_OPTION, STRICT_OPTION, CommandOption( "--rollback-on-failure", default=True, action="store_false", help="If false, prevent update from performing a rollback."), INSTANCES_SPEC_ARGUMENT ]
def get_options(self): return [ CommandOption("--verbose-logging", "-v", default=False, action="store_true", help=("Show verbose logging, including all logs up to level INFO (equivalent to " "--logging-level=20)")), CommandOption("--logging-level", default=None, type=int, help="Set logging to a specific numeric level.") ]
def get_options(self): return [ CommandOption('--threads', '-t', type=int, default=1, dest='num_threads', help='Number of threads to use'), SSH_USER_OPTION, SSH_OPTIONS, EXECUTOR_SANDBOX_OPTION, INSTANCES_SPEC_ARGUMENT, CommandOption('cmd', type=str, metavar="unix_command_line") ]
def get_options(self): return [ JOBSPEC_OPTION, ROLE_OPTION, CommandOption("--user", default=None, metavar="username", help="The name of the user who initiated the update"), CommandOption("--status", choices=JobUpdateStatus._NAMES_TO_VALUES, default=None, action="append", help="Set of update statuses to search for"), JSON_WRITE_OPTION, CommandOption("cluster", metavar="clustername", help="Cluster to search for matching updates")]
def get_options(self): return [ CommandOption("--verbose", "-v", default=False, action="store_true", help=("Show verbose output")) ]
def get_options(self): return [BIND_OPTION, JSON_READ_OPTION, CommandOption("--wait-until", choices=self.CREATE_STATES, default="PENDING", help=("Block the client until all the tasks have transitioned into the requested " "state. Default: PENDING")), BROWSER_OPTION, JOBSPEC_ARGUMENT, CONFIG_ARGUMENT]
def get_options(self): return [ BROWSER_OPTION, BIND_OPTION, ADD_INSTANCE_WAIT_OPTION, CONFIG_OPTION, JSON_READ_OPTION, TASK_INSTANCE_ARGUMENT, CommandOption('instance_count', type=int, help='Number of instances to add.') ]
def get_options(self): return [BROWSER_OPTION, CommandOption("--config", type=str, default=None, dest="config", metavar="pathname", help="Config file for the job, possibly containing hooks"), BATCH_OPTION, MAX_TOTAL_FAILURES_OPTION, NO_BATCHING_OPTION]
def get_options(self): return [ CommandOption( 'filter', type=self.update_filter, metavar="CLUSTER[/ROLE[/ENV[/JOB]]]", help=('A path-like specifier for the scope of updates to list.')), CommandOption( "--status", choices=self.STATUS_GROUPS, default=[], action="append", help="""Update state to filter by. This may be specified multiple times, in which case updates matching any of the specified statuses will be included."""), CommandOption("--user", default=None, metavar="username", help="The name of the user who initiated the update"), JSON_WRITE_OPTION ]
def get_options(self): return [ BIND_OPTION, JSON_READ_OPTION, CommandOption("--from", dest="rename_from", type=AuroraJobKey.from_path, default=None, metavar="cluster/role/env/name", help="If specified, the job key to diff against."), INSTANCES_SPEC_ARGUMENT, CONFIG_ARGUMENT]
def get_options(self): return [ CommandOption( "--reveal-errors", default=False, action="store_true", help="If enabled, allow unknown errors to generate stack dumps" ) ]
def get_options(self): return [ CommandOption( '--percentiles', type=parse_percentiles, default=None, help= """Percentiles to report uptime for. Format: values within (0.0, 100.0). Example: --percentiles=50,75,95.5"""), JOBSPEC_ARGUMENT ]
def get_options(self): return [ BIND_OPTION, JSON_READ_OPTION, CommandOption("--raw", dest="raw", default=False, action="store_true", help="Show the raw configuration."), JOBSPEC_ARGUMENT, CONFIG_ARGUMENT ]
def get_options(self): return [BROWSER_OPTION, BIND_OPTION, JSON_READ_OPTION, CONFIG_OPTION, BATCH_OPTION, MAX_TOTAL_FAILURES_OPTION, NO_BATCHING_OPTION, CommandOption('--message', '-m', type=str, default=None, help='Message to include with the kill state transition')]
def get_options(self): return [ BIND_OPTION, JSON_READ_OPTION, CommandOption("--from", dest="rename_from", type=AuroraJobKey.from_path, default=None, help="If specified, the job key to diff against."), JOBSPEC_ARGUMENT, CONFIG_ARGUMENT ]
def get_options(self): return [ CommandOption( "key", type=str, metavar="cluster[/role[/env[/job]]]", help= "A key for the cluster, role, env, or job whose scheduler page should be opened." ) ]
def get_options(self): return [ BATCH_OPTION, BIND_OPTION, BROWSER_OPTION, CONFIG_OPTION, FORCE_OPTION, HEALTHCHECK_OPTION, INSTANCES_SPEC_ARGUMENT, JSON_READ_OPTION, MAX_TOTAL_FAILURES_OPTION, STRICT_OPTION, WATCH_OPTION, CommandOption("--max-per-instance-failures", type=int, default=0, help="Maximum number of restarts per instance during restart. Increments total " "failure count when this limit is exceeded."), CommandOption("--restart-threshold", type=int, default=60, help="Maximum number of seconds before an instance must move into the RUNNING state " "before considered a failure.")]
def get_options(cls): """Returns the options that should be added to option parsers for the hooks registry.""" return [ CommandOption( "--skip-hooks", default=None, metavar="hook,hook,...", help= ("A comma-separated list of command hook names that should be skipped. If the hooks" " cannot be skipped, then the command will be aborted")) ]
def get_options(self): return [ JSON_READ_OPTION, CommandOption( "--config", type=str, default=None, dest="config_file", help="Config file for the job, possibly containing hooks"), JOBSPEC_ARGUMENT ]
def get_options(self): return [ CommandOption('--durations', type=parse_time_values, default=None, help="""Durations to report uptime for. Format: XdYhZmWs (each field optional but must be in that order.) Examples: --durations=1d' --durations=3m,10s,1h3m10s"""), JOBSPEC_ARGUMENT ]
def get_options(self): return [ SSH_USER_OPTION, EXECUTOR_SANDBOX_OPTION, CommandOption( '--tunnels', '-L', dest='tunnels', action='append', metavar='PORT:NAME', default=[], help="Add tunnel from local port PART to remote named port NAME" ), CommandOption( '--command', '-c', dest='command', type=str, default=None, help="Command to execute through the ssh connection."), TASK_INSTANCE_ARGUMENT ]
def get_options(self): return [ BATCH_OPTION, BIND_OPTION, BROWSER_OPTION, CONFIG_OPTION, FORCE_OPTION, HEALTHCHECK_OPTION, INSTANCES_SPEC_ARGUMENT, JSON_READ_OPTION, MAX_TOTAL_FAILURES_OPTION, STRICT_OPTION, WATCH_OPTION, CommandOption( "--max-per-instance-failures", type=int, default=0, help= "Maximum number of restarts per instance during restart. Increments total " "failure count when this limit is exceeded."), CommandOption( "--restart-threshold", type=int, default=0, help= "This setting is DEPRECATED, will not have any effect if provided and will be " "removed in the next release.") ]
def arg_type_jobkey(key): """Given a partial jobkey, where parts can be wildcards, parse it. Slots that are wildcards will be replaced by "*". """ parts = [] for part in key.split('/'): parts.append(part) if len(parts) > 4: raise ValueError('Job key must have no more than 4 segments') while len(parts) < 4: parts.append('*') return PartialJobKey(*parts) WILDCARD_JOBKEY_OPTION = CommandOption("jobspec", type=arg_type_jobkey, metavar="cluster[/role[/env[/name]]]", help="A jobkey, optionally containing wildcards") class CreateJobCommand(Verb): @property def name(self): return "create" @property def help(self): return "Create a service or ad hoc job using aurora" CREATE_STATES = ("PENDING", "RUNNING", "FINISHED") def get_options(self):
def get_options(self): return [ CommandOption('--bogosity', type=str, help='Permitted bogosity level') ]
class UpdateInfo(Verb): UPDATE_ID_ARGUMENT = CommandOption( 'id', type=str, nargs='?', metavar='ID', help= 'Update identifier provided by the scheduler when an update was started.' ) @property def name(self): return 'info' def get_options(self): return [JSON_WRITE_OPTION, JOBSPEC_ARGUMENT, self.UPDATE_ID_ARGUMENT] @property def help(self): return """Display detailed status information about a scheduler-driven in-progress update. If no update ID is provided, information will be displayed about the active update for the job.""" def execute(self, context): if context.options.id: key = JobUpdateKey(job=context.options.jobspec.to_thrift(), id=context.options.id) else: key = UpdateController( context.get_api(context.options.jobspec.cluster), context).get_update_key(context.options.jobspec) if key is None: context.print_err("There is no active update for this job.") return EXIT_INVALID_PARAMETER api = context.get_api(context.options.jobspec.cluster) response = api.get_job_update_details(key) context.log_response_and_raise(response) details = response.result.getJobUpdateDetailsResult.details if context.options.write_json: result = { "updateId": ("%s" % details.update.summary.key.id), "job": str(context.options.jobspec), "started": details.update.summary.state.createdTimestampMs, "last_updated": details.update.summary.state.lastModifiedTimestampMs, "status": JobUpdateStatus._VALUES_TO_NAMES[ details.update.summary.state.status], "update_events": [], "instance_update_events": [] } update_events = details.updateEvents if update_events is not None and len(update_events) > 0: for event in update_events: event_data = { "status": JobUpdateStatus._VALUES_TO_NAMES[event.status], "timestampMs": event.timestampMs } if event.message: event_data["message"] = event.message result["update_events"].append(event_data) instance_events = details.instanceEvents if instance_events is not None and len(instance_events) > 0: for event in instance_events: result["instance_update_events"].append({ "instance": event.instanceId, "timestamp": event.timestampMs, "action": JobUpdateAction._VALUES_TO_NAMES[event.action] }) context.print_out( json.dumps(result, indent=2, separators=[',', ': '], sort_keys=False)) else: context.print_out( "Job: %s, UpdateID: %s" % (context.options.jobspec, details.update.summary.key.id)) context.print_out( "Started %s, last activity: %s" % (format_timestamp( details.update.summary.state.createdTimestampMs), format_timestamp( details.update.summary.state.lastModifiedTimestampMs))) context.print_out("Current status: %s" % JobUpdateStatus._VALUES_TO_NAMES[ details.update.summary.state.status]) update_events = details.updateEvents if update_events is not None and len(update_events) > 0: context.print_out("Update events:") for event in update_events: context.print_out( "Status: %s at %s" % (JobUpdateStatus._VALUES_TO_NAMES[event.status], format_timestamp(event.timestampMs)), indent=2) if event.message: context.print_out(" message: %s" % event.message, indent=4) instance_events = details.instanceEvents if instance_events is not None and len(instance_events) > 0: context.print_out("Instance events:") for event in instance_events: context.print_out( "Instance %s at %s: %s" % (event.instanceId, format_timestamp(event.timestampMs), JobUpdateAction._VALUES_TO_NAMES[event.action]), indent=2) return EXIT_OK
class StartUpdate(Verb): UPDATE_MSG_TEMPLATE = "Job update has started. View your update progress at %s" WAIT_OPTION = CommandOption( '--wait', default=False, action='store_true', help='Wait until the update completes') def __init__(self, clock=time): self._clock = clock @property def name(self): return 'start' def get_options(self): return [ BIND_OPTION, BROWSER_OPTION, HEALTHCHECK_OPTION, JSON_READ_OPTION, MESSAGE_OPTION, STRICT_OPTION, INSTANCES_SPEC_ARGUMENT, CONFIG_ARGUMENT, self.WAIT_OPTION ] @property def help(self): return textwrap.dedent("""\ Start a rolling update of a running job, using the update configuration within the config file as a control for update velocity and failure tolerance. The updater only takes action on instances in a job that have changed, meaning that changing a single instance will only induce a restart on the changed task instance. You may want to consider using the 'aurora job diff' subcommand before updating, to preview what changes will take effect. """) def execute(self, context): job = context.options.instance_spec.jobkey instances = (None if context.options.instance_spec.instance == ALL_INSTANCES else context.options.instance_spec.instance) config = context.get_job_config(job, context.options.config_file) if config.raw().has_cron_schedule(): raise context.CommandError( EXIT_COMMAND_FAILURE, "Cron jobs may only be updated with \"aurora cron schedule\" command") api = context.get_api(config.cluster()) try: resp = api.start_job_update(config, context.options.message, instances) except AuroraClientAPI.UpdateConfigError as e: raise context.CommandError(EXIT_INVALID_CONFIGURATION, e.message) context.log_response_and_raise(resp, err_code=EXIT_API_ERROR, err_msg="Failed to start update due to error:") if resp.result: update_key = resp.result.startJobUpdateResult.key url = get_update_page( api, AuroraJobKey.from_thrift(config.cluster(), update_key.job), resp.result.startJobUpdateResult.key.id) context.print_out(self.UPDATE_MSG_TEMPLATE % url) if context.options.wait: return wait_for_update(context, self._clock, api, update_key) else: context.print_out(combine_messages(resp)) return EXIT_OK
def abort(self, job_key, message): return self._modify_update( job_key, lambda key: self.api.abort_job_update(key, message), "Failed to abort update due to error:", "Update has been aborted.") def format_timestamp(stamp_millis): return datetime.datetime.utcfromtimestamp(stamp_millis / 1000).isoformat() MESSAGE_OPTION = CommandOption( '--message', '-m', type=str, default=None, help='Message to include with the update state transition') class StartUpdate(Verb): UPDATE_MSG_TEMPLATE = "Job update has started. View your update progress at %s" WAIT_OPTION = CommandOption( '--wait', default=False, action='store_true', help='Wait until the update completes') def __init__(self, clock=time):
"Failed to resume update due to error:", "Update has been resumed.") def abort(self, job_key, message): return self._modify_update( job_key, lambda key: self.api.abort_job_update(key, message), "Failed to abort update due to error:", "Update has been aborted.") def format_timestamp(stamp_millis): return datetime.datetime.utcfromtimestamp(stamp_millis / 1000).isoformat() MESSAGE_OPTION = CommandOption( '--message', '-m', type=str, default=None, help='Message to include with the update state transition') WAIT_OPTION = lambda help_msg: CommandOption( '--wait', default=False, action='store_true', help=help_msg) class StartUpdate(Verb): UPDATE_MSG_TEMPLATE = "Job update has started. View your update progress at %s" def __init__(self, clock=time): self._clock = clock @property