示例#1
0
def really_start_cron(args, options):
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.start_cronjob(job_key, config=config)
  check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
 def test_disambiguate_job_path_or_die_unambiguous(self):
   key = LiveJobDisambiguator._disambiguate_or_die(self._api, self.ROLE, self.ENV, self.NAME)
   cluster_name, role, env, name = key
   assert cluster_name == self.CLUSTER.name
   assert role == self.ROLE
   assert env == self.ENV
   assert name == self.NAME
示例#3
0
 def test_disambiguate_args_or_die_unambiguous_with_no_config(self):
     expected = (self._api,
                 AuroraJobKey(self.CLUSTER.name, self.ROLE, self.ENV,
                              self.NAME), None)
     result = LiveJobDisambiguator.disambiguate_args_or_die(
         [self.JOB_PATH], None, client_factory=lambda *_: self._api)
     assert result == expected
示例#4
0
def really_kill(args, options):
  if options.shards is None:
    print('Shards option is required for kill; use killall to kill all shards', file=sys.stderr)
    exit(1)
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  instance_key = str(job_key)
  if options.shards is not None:
    instance_key = "%s/%s" % (instance_key, ",".join(map(str, options.shards)))
  new_cmd = ["job", "kill", instance_key]
  if config_file is not None:
    new_cmd.append("--config=%s" % config_file)
  if options.open_browser:
    new_cmd.append("--open-browser")
  if options.batch_size is not None:
    new_cmd.append("--batch-size=%s" % options.batch_size)
  if options.max_total_failures is not None:
    new_cmd.append("--max-total-failures=%s" % options.max_total_failures)
  v1_deprecation_warning("kill", new_cmd)

  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  if options.batch_size is not None:
    kill_in_batches(api, job_key, options.shards, options.batch_size, options.max_failures_option)
  else:
    resp = api.kill_job(job_key, options.shards, config=config)
    check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
  wait_kill_tasks(api.scheduler_proxy, job_key, options.shards)
示例#5
0
def restart(args, options):
  """usage: restart cluster/role/env/job
               [--shards=SHARDS]
               [--batch_size=INT]
               [--updater_health_check_interval_seconds=SECONDS]
               [--max_per_shard_failures=INT]
               [--max_total_failures=INT]
               [--restart_threshold=INT]
               [--watch_secs=SECONDS]

  Performs a rolling restart of shards within a job.

  Restarts are fully controlled client-side, so aborting halts the restart.
  """
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  updater_config = UpdaterConfig(
      options.batch_size,
      options.restart_threshold,
      options.watch_secs,
      options.max_per_shard_failures,
      options.max_total_failures)
  resp = api.restart(job_key, options.shards, updater_config,
      options.health_check_interval_seconds, config=config)
  check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
示例#6
0
def status(args, options):
    """usage: status cluster/role/env/job

  Fetches and prints information about the active tasks in a job.
  """
    def is_active(task):
        return task.status in ACTIVE_STATES

    def print_task(scheduled_task):
        assigned_task = scheduled_task.assignedTask
        taskInfo = assigned_task.task
        taskString = ''
        if taskInfo:
            taskString += '''cpus: %s, ram: %s MB, disk: %s MB''' % (
                taskInfo.numCpus, taskInfo.ramMb, taskInfo.diskMb)
        if assigned_task.assignedPorts:
            taskString += '\n\tports: %s' % assigned_task.assignedPorts
        taskString += '\n\tfailure count: %s (max %s)' % (
            scheduled_task.failureCount, taskInfo.maxTaskFailures)
        taskString += '\n\tevents:'
        for event in scheduled_task.taskEvents:
            taskString += '\n\t\t %s %s: %s' % (
                datetime.fromtimestamp(event.timestamp / 1000),
                ScheduleStatus._VALUES_TO_NAMES[event.status], event.message)
        taskString += '\n\tmetadata:'
        if assigned_task.task.metadata is not None:
            for md in assigned_task.task.metadata:
                taskString += ('\n\t\t%s: %s' % (md.key, md.value))

        return taskString

    def print_tasks(tasks):
        for task in tasks:
            taskString = print_task(task)

            log.info(
                'role: %s, env: %s, name: %s, shard: %s, status: %s on %s\n%s'
                %
                (task.assignedTask.task.owner.role,
                 task.assignedTask.task.environment,
                 task.assignedTask.task.jobName, task.assignedTask.instanceId,
                 ScheduleStatus._VALUES_TO_NAMES[task.status],
                 task.assignedTask.slaveHost, taskString))

    api, job_key, _ = LiveJobDisambiguator.disambiguate_args_or_die(
        args, options, make_client_factory())
    v1_deprecation_warning("status", ["job", "status", args[0]])
    resp = api.check_status(job_key)
    check_and_log_response(resp)

    tasks = resp.result.scheduleStatusResult.tasks
    if tasks:
        active_tasks = filter(is_active, tasks)
        log.info('Active Tasks (%s)' % len(active_tasks))
        print_tasks(active_tasks)
        inactive_tasks = filter(lambda x: not is_active(x), tasks)
        log.info('Inactive Tasks (%s)' % len(inactive_tasks))
        print_tasks(inactive_tasks)
    else:
        log.info('No tasks found.')
示例#7
0
def really_kill(args, options):
    if options.shards is None:
        print(
            'Shards option is required for kill; use killall to kill all shards',
            file=sys.stderr)
        exit(1)
    api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
        args, options, make_client_factory())
    instance_key = str(job_key)
    if options.shards is not None:
        instance_key = "%s/%s" % (instance_key, ",".join(
            map(str, options.shards)))
    new_cmd = ["job", "kill", instance_key]
    if config_file is not None:
        new_cmd.append("--config=%s" % config_file)
    if options.open_browser:
        new_cmd.append("--open-browser")
    if options.batch_size is not None:
        new_cmd.append("--batch-size=%s" % options.batch_size)
    if options.max_total_failures is not None:
        new_cmd.append("--max-total-failures=%s" % options.max_total_failures)
    v1_deprecation_warning("kill", new_cmd)

    config = get_job_config(job_key.to_path(), config_file,
                            options) if config_file else None
    if options.batch_size is not None:
        kill_in_batches(api, job_key, options.shards, options.batch_size,
                        options.max_failures_option)
    else:
        resp = api.kill_job(job_key, options.shards, config=config)
        check_and_log_response(resp)
    handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role,
                job_key.env, job_key.name)
    wait_kill_tasks(api.scheduler_proxy, job_key, options.shards)
示例#8
0
def status(args, options):
  """usage: status cluster/role/env/job

  Fetches and prints information about the active tasks in a job.
  """
  def is_active(task):
    return task.status in ACTIVE_STATES

  def print_task(scheduled_task):
    assigned_task = scheduled_task.assignedTask
    taskInfo = assigned_task.task
    taskString = ''
    if taskInfo:
      taskString += '''cpus: %s, ram: %s MB, disk: %s MB''' % (taskInfo.numCpus,
                                                               taskInfo.ramMb,
                                                               taskInfo.diskMb)
    if assigned_task.assignedPorts:
      taskString += '\n\tports: %s' % assigned_task.assignedPorts
    taskString += '\n\tfailure count: %s (max %s)' % (scheduled_task.failureCount,
                                                      taskInfo.maxTaskFailures)
    taskString += '\n\tevents:'
    for event in scheduled_task.taskEvents:
      taskString += '\n\t\t %s %s: %s' % (datetime.fromtimestamp(event.timestamp / 1000),
                                          ScheduleStatus._VALUES_TO_NAMES[event.status],
                                          event.message)
    taskString += '\n\tmetadata:'
    if assigned_task.task.metadata is not None:
      for md in assigned_task.task.metadata:
        taskString += ('\n\t\t%s: %s' % (md.key, md.value))

    return taskString

  def print_tasks(tasks):
    for task in tasks:
      taskString = print_task(task)

      log.info('role: %s, env: %s, name: %s, shard: %s, status: %s on %s\n%s' %
             (task.assignedTask.task.owner.role,
              task.assignedTask.task.environment,
              task.assignedTask.task.jobName,
              task.assignedTask.instanceId,
              ScheduleStatus._VALUES_TO_NAMES[task.status],
              task.assignedTask.slaveHost,
              taskString))

  api, job_key, _ = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  resp = api.check_status(job_key)
  check_and_log_response(resp)

  tasks = resp.result.scheduleStatusResult.tasks
  if tasks:
    active_tasks = filter(is_active, tasks)
    log.info('Active Tasks (%s)' % len(active_tasks))
    print_tasks(active_tasks)
    inactive_tasks = filter(lambda x: not is_active(x), tasks)
    log.info('Inactive Tasks (%s)' % len(inactive_tasks))
    print_tasks(inactive_tasks)
  else:
    log.info('No tasks found.')
示例#9
0
def really_cancel_update(args, options):
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  new_cmd = ["job", "cancel-update", str(job_key)]
  v1_deprecation_warning("cancel_update", new_cmd)
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.cancel_update(job_key, config=config)
  check_and_log_response(resp)
示例#10
0
 def test_disambiguate_job_path_or_die_unambiguous(self):
     key = LiveJobDisambiguator._disambiguate_or_die(
         self._api, self.ROLE, self.ENV, self.NAME)
     cluster_name, role, env, name = key
     assert cluster_name == self.CLUSTER.name
     assert role == self.ROLE
     assert env == self.ENV
     assert name == self.NAME
示例#11
0
def really_cancel_update(args, options):
    api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
        args, options, make_client_factory())
    new_cmd = ["job", "cancel-update", str(job_key)]
    v1_deprecation_warning("cancel_update", new_cmd)
    config = get_job_config(job_key.to_path(), config_file,
                            options) if config_file else None
    resp = api.cancel_update(job_key, config=config)
    check_and_log_response(resp)
示例#12
0
def really_start_cron(args, options):
    api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
        args, options, make_client_factory())
    config = get_job_config(job_key.to_path(), config_file,
                            options) if config_file else None
    resp = api.start_cronjob(job_key, config=config)
    check_and_log_response(resp)
    handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role,
                job_key.env, job_key.name)
示例#13
0
def cancel_update(args, options):
  """usage: cancel_update cluster/role/env/job

  Unlocks a job for updates.
  A job may be locked if a client's update session terminated abnormally,
  or if another user is actively updating the job.  This command should only
  be used when the user is confident that they are not conflicting with another user.
  """
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.cancel_update(job_key, config=config)
  check_and_log_response(resp)
示例#14
0
def start_cron(args, options):
  """usage: start_cron cluster/role/env/job

  Invokes a cron job immediately, out of its normal cron cycle.
  This does not affect the cron cycle in any way.
  """

  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.start_cronjob(job_key, config=config)
  check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
示例#15
0
def kill(args, options):
  """usage: kill cluster/role/env/job

  Kills a running job, blocking until all tasks have terminated.

  Default behaviour is to kill all shards in the job, but the kill
  can be limited to specific shards with the --shards option
  """
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  options = app.get_options()
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.kill_job(job_key, options.shards, config=config)
  check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
示例#16
0
def kill(args, options):
  """usage: kill --shards=shardspec cluster/role/env/job

  Kills a group of tasks in a running job, blocking until all specified tasks have terminated.

  """
  if options.shards is None:
    print('Shards option is required for kill; use killall to kill all shards', file=sys.stderr)
    exit(1)
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  options = app.get_options()
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.kill_job(job_key, options.shards, config=config)
  check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
示例#17
0
def really_restart(args, options):
  if options.max_total_failures < 0:
    print("max_total_failures option must be >0, but you specified %s" % options.max_total_failures,
      file=sys.stderr)
    exit(1)
  maybe_disable_hooks(options)
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  updater_config = UpdaterConfig(
      options.batch_size,
      options.restart_threshold,
      options.watch_secs,
      options.max_per_shard_failures,
      options.max_total_failures)
  resp = api.restart(job_key, options.shards, updater_config,
      options.health_check_interval_seconds, config=config)
  check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
示例#18
0
def kill(args, options):
  """usage: kill --shards=shardspec cluster/role/env/job

  Kills a group of tasks in a running job, blocking until all specified tasks have terminated.

  """
  CoreCommandHook.run_hooks("kill", options, *args)
  maybe_disable_hooks(options)
  if options.shards is None:
    print('Shards option is required for kill; use killall to kill all shards', file=sys.stderr)
    exit(1)
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  options = app.get_options()
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  if options.batch_size is not None:
    kill_in_batches(api, job_key, options.shards, options.batch_size, options.max_failures_option)
  else:
    resp = api.kill_job(job_key, options.shards, config=config)
    check_and_log_response(resp)
  handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role, job_key.env, job_key.name)
  wait_kill_tasks(api.scheduler_proxy, job_key, options.shards)
示例#19
0
def really_restart(args, options):
    if options.max_total_failures < 0:
        print("max_total_failures option must be >0, but you specified %s" %
              options.max_total_failures,
              file=sys.stderr)
        exit(1)
    maybe_disable_hooks(options)
    api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
        args, options, make_client_factory())
    config = get_job_config(job_key.to_path(), config_file,
                            options) if config_file else None
    updater_config = UpdaterConfig(options.batch_size,
                                   options.restart_threshold,
                                   options.watch_secs,
                                   options.max_per_shard_failures,
                                   options.max_total_failures)
    resp = api.restart(job_key,
                       options.shards,
                       updater_config,
                       options.health_check_interval_seconds,
                       config=config)
    check_and_log_response(resp)
    handle_open(api.scheduler_proxy.scheduler_client().url, job_key.role,
                job_key.env, job_key.name)
示例#20
0
def really_cancel_update(args, options):
  api, job_key, config_file = LiveJobDisambiguator.disambiguate_args_or_die(
      args, options, make_client_factory())
  new_cmd = ["job", "restart"]
  instance_key = args[0]
  if options.shards is not None:
    instance_key += "/" + ",".join(map(str, option.shards))
  new_cmd.append(instance_key)
  if config_file is not None:
    new_cmd.append("--config=%s" % config_file)
  if options.batch_size != 1:
    new_cmd.append("--batch-size=%s" % options.batch_size)
  if options.max_per_shard_failures != 0:
    new_cmd.append("--max-per-shard-failures=%s" % options.max_per_shard_failures)
  if options.max_total_failures != 0:
    new_cmd.append("--max-total-failures=%s" % options.max_total_failures)
  if options.restart_threshold != 60:
    new_cmd.append("--restart-threshold=%s" % options.restart)
  if options.watch_secs != 30:
    new_cmd.append("--watch-secs=%s" % options.watch_secs)
  v1_deprecation_warning("update", new_cmd)
  config = get_job_config(job_key.to_path(), config_file, options) if config_file else None
  resp = api.cancel_update(job_key, config=config)
  check_and_log_response(resp)
 def test_disambiguate_args_or_die_unambiguous_with_no_config(self):
   expected = (self._api, AuroraJobKey(self.CLUSTER.name, self.ROLE, self.ENV, self.NAME), None)
   result = LiveJobDisambiguator.disambiguate_args_or_die([self.JOB_PATH], None,
       client_factory=lambda *_: self._api)
   assert result == expected
示例#22
0
 def test_ambiguous_property(self):
     assert LiveJobDisambiguator(self._api, self.ROLE, None,
                                 self.NAME).ambiguous
     assert not LiveJobDisambiguator(self._api, self.ROLE, self.ENV,
                                     self.NAME).ambiguous
 def _try_disambiguate_ambiguous(self):
   return LiveJobDisambiguator._disambiguate_or_die(self._api, self.ROLE, None, self.NAME)
示例#24
0
 def _try_disambiguate_ambiguous(self):
     return LiveJobDisambiguator._disambiguate_or_die(
         self._api, self.ROLE, None, self.NAME)