示例#1
0
 def run_with_class(process_class):
   with temporary_dir() as td:
     taskpath = TaskPath(root=td, task_id='task', process='process', run=0)
     sandbox = setup_sandbox(td, taskpath)
     with open(os.path.join(sandbox, 'silly_pants'), 'w') as silly_pants:
       p = process_class('process', 'echo test >&%s' % silly_pants.fileno(),
           0, taskpath, sandbox)
       p.start()
       return wait_for_rc(taskpath.getpath('process_checkpoint'))
示例#2
0
def test_simple_process_other_user(*args):
  with temporary_dir() as td:
    some_user = get_other_nonroot_user()
    taskpath = TaskPath(root=td, task_id='task', process='process', run=0)
    sandbox = setup_sandbox(td, taskpath)

    p = TestProcess('process', 'echo hello world', 0, taskpath, sandbox, user=some_user.pw_name)
    p.start()
    rc = wait_for_rc(taskpath.getpath('process_checkpoint'))

    # since we're not actually root, the best we can do is check the right things were attempted
    assert os.setgroups.calledwith([g.gr_gid for g in grp.getgrall() if some_user.pw_name in g])
    assert os.setgid.calledwith(some_user.pw_uid)
    assert os.setuid.calledwith(some_user.pw_gid)
示例#3
0
def test_simple_process():
  with temporary_dir() as td:
    taskpath = TaskPath(root=td, task_id='task', process='process', run=0)
    sandbox = setup_sandbox(td, taskpath)

    p = TestProcess('process', 'echo hello world', 0, taskpath, sandbox)
    p.start()
    rc = wait_for_rc(taskpath.getpath('process_checkpoint'))

    assert rc == 0
    stdout = taskpath.with_filename('stdout').getpath('process_logdir')
    assert os.path.exists(stdout)
    with open(stdout, 'r') as fp:
      assert fp.read() == 'hello world\n'
示例#4
0
 def get(cls, task_id, checkpoint_root):
   """
     Get a TaskRunner bound to the task_id in checkpoint_root.
   """
   path = TaskPath(root=checkpoint_root, task_id=task_id, state='active')
   task_json = path.getpath('task_path')
   task_checkpoint = path.getpath('runner_checkpoint')
   if not os.path.exists(task_json):
     return None
   task = ThermosConfigLoader.load_json(task_json)
   if task is None:
     return None
   if len(task.tasks()) == 0:
     return None
   try:
     checkpoint = CheckpointDispatcher.from_file(task_checkpoint)
     if checkpoint is None or checkpoint.header is None:
       return None
     return cls(task.tasks()[0].task(), checkpoint_root, checkpoint.header.sandbox,
                log_dir=checkpoint.header.log_dir, task_id=task_id,
                portmap=checkpoint.header.ports)
   except Exception as e:
     log.error('Failed to reconstitute checkpoint in TaskRunner.get: %s' % e, exc_info=True)
     return None
示例#5
0
def test_log_permissions():
  with temporary_dir() as td:
    taskpath = TaskPath(root=td, task_id='task', process='process', run=0)
    sandbox = setup_sandbox(td, taskpath)

    p = TestProcess('process', 'echo hello world', 0, taskpath, sandbox)
    p.start()
    rc = wait_for_rc(taskpath.getpath('process_checkpoint'))

    stdout = taskpath.with_filename('stdout').getpath('process_logdir')
    stderr = taskpath.with_filename('stderr').getpath('process_logdir')
    assert os.path.exists(stdout)
    assert os.path.exists(stderr)
    assert os.stat(stdout).st_uid == os.getuid()
    assert os.stat(stderr).st_uid == os.getuid()
示例#6
0
def test_log_permissions_other_user(*mocks):
  with temporary_dir() as td:
    some_user = get_other_nonroot_user()
    taskpath = TaskPath(root=td, task_id='task', process='process', run=0)
    sandbox = setup_sandbox(td, taskpath)

    p = TestProcess('process', 'echo hello world', 0, taskpath, sandbox, user=some_user.pw_name)
    p.start()
    rc = wait_for_rc(taskpath.getpath('process_checkpoint'))

    # since we're not actually root, the best we can do is check the right things were attempted
    stdout = taskpath.with_filename('stdout').getpath('process_logdir')
    stderr = taskpath.with_filename('stderr').getpath('process_logdir')
    assert os.path.exists(stdout)
    assert os.path.exists(stderr)
    assert os.chown.calledwith(stdout, some_user.pw_uid, some_user.pw_gid)
    assert os.chown.calledwith(stderr, some_user.pw_uid, some_user.pw_gid)
示例#7
0
    def kill(cls, task_id, checkpoint_root, force=False, terminal_status=TaskState.KILLED, clock=time):
        """
      An implementation of Task killing that doesn't require a fully hydrated TaskRunner object.
      Terminal status must be either KILLED or LOST state.
    """
        if terminal_status not in (TaskState.KILLED, TaskState.LOST):
            raise cls.Error(
                "terminal_status must be KILLED or LOST (got %s)" % TaskState._VALUES_TO_NAMES.get(terminal_status)
                or terminal_status
            )
        pathspec = TaskPath(root=checkpoint_root, task_id=task_id)
        checkpoint = pathspec.getpath("runner_checkpoint")
        state = CheckpointDispatcher.from_file(checkpoint)

        if state is None or state.header is None or state.statuses is None:
            if force:
                log.error("Task has uninitialized TaskState - forcibly finalizing")
                cls.finalize_task(pathspec)
                return
            else:
                log.error("Cannot update states in uninitialized TaskState!")
                return

        ckpt = cls.open_checkpoint(checkpoint, force=force, state=state)

        def write_task_state(state):
            update = TaskStatus(
                state=state, timestamp_ms=int(clock.time() * 1000), runner_pid=os.getpid(), runner_uid=os.getuid()
            )
            ckpt.write(RunnerCkpt(task_status=update))

        def write_process_status(status):
            ckpt.write(RunnerCkpt(process_status=status))

        if cls.is_task_terminal(state.statuses[-1].state):
            log.info("Task is already in terminal state!  Finalizing.")
            cls.finalize_task(pathspec)
            return

        with closing(ckpt):
            write_task_state(TaskState.ACTIVE)
            for process, history in state.processes.items():
                process_status = history[-1]
                if not cls.is_process_terminal(process_status.state):
                    if cls.kill_process(state, process):
                        write_process_status(
                            ProcessStatus(
                                process=process,
                                state=ProcessState.KILLED,
                                seq=process_status.seq + 1,
                                return_code=-9,
                                stop_time=clock.time(),
                            )
                        )
                    else:
                        if process_status.state is not ProcessState.WAITING:
                            write_process_status(
                                ProcessStatus(process=process, state=ProcessState.LOST, seq=process_status.seq + 1)
                            )
            write_task_state(terminal_status)
        cls.finalize_task(pathspec)
示例#8
0
    def kill(cls,
             task_id,
             checkpoint_root,
             force=False,
             terminal_status=TaskState.KILLED,
             clock=time):
        """
      An implementation of Task killing that doesn't require a fully hydrated TaskRunner object.
      Terminal status must be either KILLED or LOST state.
    """
        if terminal_status not in (TaskState.KILLED, TaskState.LOST):
            raise cls.Error('terminal_status must be KILLED or LOST (got %s)' %
                            TaskState._VALUES_TO_NAMES.get(terminal_status)
                            or terminal_status)
        pathspec = TaskPath(root=checkpoint_root, task_id=task_id)
        checkpoint = pathspec.getpath('runner_checkpoint')
        state = CheckpointDispatcher.from_file(checkpoint)

        if state is None or state.header is None or state.statuses is None:
            if force:
                log.error(
                    'Task has uninitialized TaskState - forcibly finalizing')
                cls.finalize_task(pathspec)
                return
            else:
                log.error('Cannot update states in uninitialized TaskState!')
                return

        ckpt = cls.open_checkpoint(checkpoint, force=force, state=state)

        def write_task_state(state):
            update = TaskStatus(state=state,
                                timestamp_ms=int(clock.time() * 1000),
                                runner_pid=os.getpid(),
                                runner_uid=os.getuid())
            ckpt.write(RunnerCkpt(task_status=update))

        def write_process_status(status):
            ckpt.write(RunnerCkpt(process_status=status))

        if cls.is_task_terminal(state.statuses[-1].state):
            log.info('Task is already in terminal state!  Finalizing.')
            cls.finalize_task(pathspec)
            return

        with closing(ckpt):
            write_task_state(TaskState.ACTIVE)
            for process, history in state.processes.items():
                process_status = history[-1]
                if not cls.is_process_terminal(process_status.state):
                    if cls.kill_process(state, process):
                        write_process_status(
                            ProcessStatus(process=process,
                                          state=ProcessState.KILLED,
                                          seq=process_status.seq + 1,
                                          return_code=-9,
                                          stop_time=clock.time()))
                    else:
                        if process_status.state is not ProcessState.WAITING:
                            write_process_status(
                                ProcessStatus(process=process,
                                              state=ProcessState.LOST,
                                              seq=process_status.seq + 1))
            write_task_state(terminal_status)
        cls.finalize_task(pathspec)
示例#9
0
class Runner(object):
    RUN_JOB_SCRIPT = """
import os
import random
import sys
from twitter.common import log
from twitter.common.log.options import LogOptions
from twitter.thermos.config.loader import ThermosConfigLoader
from twitter.thermos.core.helper import TaskRunnerHelper
from twitter.thermos.core.runner import TaskRunner, TaskRunnerUniversalHandler
from thrift.TSerialization import serialize as thrift_serialize

random.seed(%(random_seed)d)

log.init('runner_base')
LogOptions.set_disk_log_level('DEBUG')

task = ThermosConfigLoader.load_json('%(filename)s')
task = task.tasks()[0].task

success_rate=%(success_rate)d

class AngryHandler(TaskRunnerUniversalHandler):
  def checkpoint(self, record):
    if not self._runner._recovery:
      if random.randint(0, 100) <= success_rate:
        super(AngryHandler, self).checkpoint(record)
      else:
        sys.exit(1)

sandbox = os.path.join('%(sandbox)s', '%(task_id)s')
args = {}
args['task_id'] = '%(task_id)s'
if %(portmap)s:
  args['portmap'] = %(portmap)s
args['universal_handler'] = AngryHandler

runner = TaskRunner(task, '%(root)s', sandbox, **args)
runner.run()

with open('%(state_filename)s', 'w') as fp:
  fp.write(thrift_serialize(runner.state))
"""

    def __init__(self, task, portmap={}, success_rate=100, random_seed=31337):
        """
      task = Thermos task
      portmap = port map
      success_rate = success rate of writing checkpoint to disk
    """
        self.task = task

        with temporary_file(cleanup=False) as fp:
            self.job_filename = fp.name
            fp.write(ThermosTaskWrapper(task).to_json())

        self.state_filename = tempfile.mktemp()
        self.tempdir = tempfile.mkdtemp()
        self.task_id = '%s-runner-base' % int(time.time() * 1000000)
        self.sandbox = os.path.join(self.tempdir, 'sandbox')
        self.portmap = portmap
        self.cleaned = False
        self.pathspec = TaskPath(root=self.tempdir, task_id=self.task_id)
        self.script_filename = None
        self.success_rate = success_rate
        self.random_seed = random_seed
        self._run_count = 0

    @property
    def pid(self):
        return self.po.pid

    @property
    def root(self):
        return self.tempdir

    def run(self):
        self._run_count += 1
        atexit.register(self.cleanup)

        if self.script_filename:
            os.unlink(self.script_filename)

        with temporary_file(cleanup=False) as fp:
            self.script_filename = fp.name
            fp.write(
                self.RUN_JOB_SCRIPT % {
                    'filename': self.job_filename,
                    'sandbox': self.sandbox,
                    'root': self.tempdir,
                    'task_id': self.task_id,
                    'state_filename': self.state_filename,
                    'portmap': repr(self.portmap),
                    'success_rate': self.success_rate,
                    'random_seed': self.random_seed + self._run_count,
                })

        with environment_as(PYTHONPATH=os.pathsep.join(sys.path)):
            self.po = subprocess.Popen([sys.executable, self.script_filename],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            try:
                so, se = self.po.communicate()
            except OSError as e:
                if e.errno == errno.ECHILD:
                    so = se = 'Killed'
                else:
                    raise

        rc = self.po.returncode
        if rc != 0:
            if os.path.exists(self.job_filename):
                config = open(self.job_filename).read()
            else:
                config = 'Nonexistent!'
            if 'THERMOS_DEBUG' in os.environ:
                print(
                    "Runner failed!\n\n\nconfig:%s\n\n\nstdout:%s\n\n\nstderr:%s\n\n\n"
                    % (config, so, se))

        try:
            with open(self.state_filename, 'r') as fp:
                self.state = thrift_deserialize(RunnerState(), fp.read())
        except Exception as e:
            if 'THERMOS_DEBUG' in os.environ:
                print('Failed to load Runner state: %s' % e, file=sys.stderr)
            self.state = RunnerState()

        try:
            self.reconstructed_state = CheckpointDispatcher.from_file(
                self.pathspec.getpath('runner_checkpoint'))
        except:
            self.reconstructed_state = None
        self.initialized = True
        return rc

    def cleanup(self):
        if not self.cleaned:
            if hasattr(self, 'po'):
                try:
                    self.po.kill()
                except:
                    pass
            os.unlink(self.job_filename)
            os.unlink(self.script_filename)
            if 'THERMOS_DEBUG' not in os.environ:
                shutil.rmtree(self.tempdir, ignore_errors=True)
            else:
                print('Logs saved in %s' % self.tempdir)
            self.cleaned = True
示例#10
0
class TaskRunner(object):
  """
    Run a ThermosTask.

    This class encapsulates the core logic to run and control the state of a Thermos task.
    Typically, it will be instantiated directly to control a new task, but a TaskRunner can also be
    synthesised from an existing task's checkpoint root
  """
  class Error(Exception): pass
  class InvalidTask(Error): pass
  class InternalError(Error): pass
  class PermissionError(Error): pass
  class StateError(Error): pass

  # Maximum amount of time we spend waiting for new updates from the checkpoint streams
  # before doing housecleaning (checking for LOST tasks, dead PIDs.)
  MAX_ITERATION_TIME = Amount(10, Time.SECONDS)

  # Minimum amount of time we wait between polls for updates on coordinator checkpoints.
  COORDINATOR_INTERVAL_SLEEP = Amount(1, Time.SECONDS)

  # Amount of time we're willing to wait after forking before we expect the runner to have
  # exec'ed the child process.
  LOST_TIMEOUT = Amount(60, Time.SECONDS)

  # Active task stages
  STAGES = {
    TaskState.ACTIVE: TaskRunnerStage_ACTIVE,
    TaskState.CLEANING: TaskRunnerStage_CLEANING,
    TaskState.FINALIZING: TaskRunnerStage_FINALIZING
  }

  @classmethod
  def get(cls, task_id, checkpoint_root):
    """
      Get a TaskRunner bound to the task_id in checkpoint_root.
    """
    path = TaskPath(root=checkpoint_root, task_id=task_id, state='active')
    task_json = path.getpath('task_path')
    task_checkpoint = path.getpath('runner_checkpoint')
    if not os.path.exists(task_json):
      return None
    task = ThermosConfigLoader.load_json(task_json)
    if task is None:
      return None
    if len(task.tasks()) == 0:
      return None
    try:
      checkpoint = CheckpointDispatcher.from_file(task_checkpoint)
      if checkpoint is None or checkpoint.header is None:
        return None
      return cls(task.tasks()[0].task(), checkpoint_root, checkpoint.header.sandbox,
                 log_dir=checkpoint.header.log_dir, task_id=task_id,
                 portmap=checkpoint.header.ports)
    except Exception as e:
      log.error('Failed to reconstitute checkpoint in TaskRunner.get: %s' % e, exc_info=True)
      return None

  def __init__(self, task, checkpoint_root, sandbox, log_dir=None,
               task_id=None, portmap=None, user=None, chroot=False, clock=time,
               universal_handler=None, planner_class=TaskPlanner):
    """
      required:
        task (config.Task) = the task to run
        checkpoint_root (path) = the checkpoint root
        sandbox (path) = the sandbox in which the path will be run
                         [if None, cwd will be assumed, but garbage collection will be
                          disabled for this task.]

      optional:
        log_dir (string)  = directory to house stdout/stderr logs. If not specified, logs will be
                            written into the sandbox directory under .logs/
        task_id (string)  = bind to this task id.  if not specified, will synthesize an id based
                            upon task.name()
        portmap (dict)    = a map (string => integer) from name to port, e.g. { 'http': 80 }
        user (string)     = the user to run the task as.  if not current user, requires setuid
                            privileges.
        chroot (boolean)  = whether or not to chroot into the sandbox prior to exec.
        clock (time interface) = the clock to use throughout
        universal_handler = checkpoint record handler (only used for testing)
        planner_class (TaskPlanner class) = TaskPlanner class to use for constructing the task
                            planning policy.
    """
    if not issubclass(planner_class, TaskPlanner):
      raise TypeError('planner_class must be a TaskPlanner.')
    self._clock = clock
    launch_time = self._clock.time()
    launch_time_ms = '%06d' % int((launch_time - int(launch_time)) * 10**6)
    if not task_id:
      self._task_id = '%s-%s.%s' % (task.name(),
                                    time.strftime('%Y%m%d-%H%M%S', time.localtime(launch_time)),
                                    launch_time_ms)
    else:
      self._task_id = task_id
    current_user = TaskRunnerHelper.get_actual_user()
    self._user = user or current_user
    # TODO(wickman) This should be delegated to the ProcessPlatform / Helper
    if self._user != current_user:
      if os.geteuid() != 0:
        raise ValueError('task specifies user as %s, but %s does not have setuid permission!' % (
          self._user, current_user))
    self._portmap = portmap or {}
    self._launch_time = launch_time
    self._log_dir = log_dir or os.path.join(sandbox, '.logs')
    self._pathspec = TaskPath(root=checkpoint_root, task_id=self._task_id, log_dir=self._log_dir)
    try:
      ThermosTaskValidator.assert_valid_task(task)
      ThermosTaskValidator.assert_valid_ports(task, self._portmap)
    except ThermosTaskValidator.InvalidTaskError as e:
      raise self.InvalidTask('Invalid task: %s' % e)
    context = ThermosContext(
        task_id=self._task_id,
        ports=self._portmap,
        user=self._user)
    self._task, uninterp = (task % Environment(thermos=context)).interpolate()
    if len(uninterp) > 0:
      raise self.InvalidTask('Failed to interpolate task, missing: %s' %
          ', '.join(str(ref) for ref in uninterp))
    try:
      ThermosTaskValidator.assert_same_task(self._pathspec, self._task)
    except ThermosTaskValidator.InvalidTaskError as e:
      raise self.InvalidTask('Invalid task: %s' % e)
    self._plan = None # plan currently being executed (updated by Handlers)
    self._regular_plan = planner_class(self._task, clock=clock,
        process_filter=lambda proc: proc.final().get() == False)
    self._finalizing_plan = planner_class(self._task, clock=clock,
        process_filter=lambda proc: proc.final().get() == True)
    self._chroot = chroot
    self._sandbox = sandbox
    self._terminal_state = None
    self._ckpt = None
    self._process_map = dict((p.name().get(), p) for p in self._task.processes())
    self._task_processes = {}
    self._stages = dict((state, stage(self)) for state, stage in self.STAGES.items())
    self._finalization_start = None
    self._preemption_deadline = None
    self._watcher = ProcessMuxer(self._pathspec)
    self._state   = RunnerState(processes = {})

    # create runner state
    universal_handler = universal_handler or TaskRunnerUniversalHandler
    self._dispatcher = CheckpointDispatcher()
    self._dispatcher.register_handler(universal_handler(self))
    self._dispatcher.register_handler(TaskRunnerProcessHandler(self))
    self._dispatcher.register_handler(TaskRunnerTaskHandler(self))

    # recover checkpointed runner state and update plan
    self._recovery = True
    self._replay_runner_ckpt()

  @property
  def task(self):
    return self._task

  @property
  def task_id(self):
    return self._task_id

  @property
  def state(self):
    return self._state

  @property
  def processes(self):
    return self._task_processes

  def task_state(self):
    return self._state.statuses[-1].state if self._state.statuses else TaskState.ACTIVE

  def close_ckpt(self):
    """Force close the checkpoint stream.  This is necessary for runners terminated through
       exception propagation."""
    log.debug('Closing the checkpoint stream.')
    self._ckpt.close()

  @contextmanager
  def control(self, force=False):
    """
      Bind to the checkpoint associated with this task, position to the end of the log if
      it exists, or create it if it doesn't.  Fails if we cannot get "leadership" i.e. a
      file lock on the checkpoint stream.
    """
    if self.is_terminal():
      raise TaskRunner.StateError('Cannot take control of a task in terminal state.')
    if self._sandbox:
      safe_mkdir(self._sandbox)
    ckpt_file = self._pathspec.getpath('runner_checkpoint')
    try:
      self._ckpt = TaskRunnerHelper.open_checkpoint(ckpt_file, force=force, state=self._state)
    except TaskRunnerHelper.PermissionError:
      raise TaskRunner.PermissionError('Unable to open checkpoint %s' % ckpt_file)
    log.debug('Flipping recovery mode off.')
    self._recovery = False
    self._set_task_status(self.task_state())
    self._resume_task()
    try:
      yield
    except Exception as e:
      log.error('Caught exception in self.control(): %s' % e)
      log.error('  %s' % traceback.format_exc())
    self._ckpt.close()

  def _resume_task(self):
    assert self._ckpt is not None
    unapplied_updates = self._replay_process_ckpts()
    if self.is_terminal():
      raise self.StateError('Cannot resume terminal task.')
    self._initialize_ckpt_header()
    self._replay(unapplied_updates)

  def _ckpt_write(self, record):
    """
      Write to the checkpoint stream if we're not in recovery mode.
    """
    if not self._recovery:
      self._ckpt.write(record)

  def _replay(self, checkpoints):
    """
      Replay a sequence of RunnerCkpts.
    """
    for checkpoint in checkpoints:
      self._dispatcher.dispatch(self._state, checkpoint)

  def _replay_runner_ckpt(self):
    """
      Replay the checkpoint stream associated with this task.
    """
    ckpt_file = self._pathspec.getpath('runner_checkpoint')
    if os.path.exists(ckpt_file):
      fp = open(ckpt_file, "r")
      ckpt_recover = ThriftRecordReader(fp, RunnerCkpt)
      for record in ckpt_recover:
        log.debug('Replaying runner checkpoint record: %s' % record)
        self._dispatcher.dispatch(self._state, record, recovery=True)
      ckpt_recover.close()

  def _replay_process_ckpts(self):
    """
      Replay the unmutating process checkpoints.  Return the unapplied process updates that
      would mutate the runner checkpoint stream.
    """
    process_updates = self._watcher.select()
    unapplied_process_updates = []
    for process_update in process_updates:
      if self._dispatcher.would_update(self._state, process_update):
        unapplied_process_updates.append(process_update)
      else:
        self._dispatcher.dispatch(self._state, process_update, recovery=True)
    return unapplied_process_updates

  def _initialize_ckpt_header(self):
    """
      Initializes the RunnerHeader for this checkpoint stream if it has not already
      been constructed.
    """
    if self._state.header is None:
      header = RunnerHeader(
        task_id=self._task_id,
        launch_time_ms=int(self._launch_time*1000),
        sandbox=self._sandbox,
        log_dir=self._log_dir,
        hostname=socket.gethostname(),
        user=self._user,
        ports=self._portmap)
      runner_ckpt = RunnerCkpt(runner_header=header)
      self._dispatcher.dispatch(self._state, runner_ckpt)

  def _set_task_status(self, state):
    update = TaskStatus(state=state, timestamp_ms=int(self._clock.time() * 1000),
                        runner_pid=os.getpid(), runner_uid=os.getuid())
    runner_ckpt = RunnerCkpt(task_status=update)
    self._dispatcher.dispatch(self._state, runner_ckpt, self._recovery)

  def _finalization_remaining(self):
    # If a preemption deadline has been set, use that.
    if self._preemption_deadline:
      return max(0, self._preemption_deadline - self._clock.time())

    # Otherwise, use the finalization wait provided in the configuration.
    finalization_allocation = self.task.finalization_wait().get()
    if self._finalization_start is None:
      return sys.float_info.max
    else:
     waited = max(0, self._clock.time() - self._finalization_start)
     return max(0, finalization_allocation - waited)

  def _set_process_status(self, process_name, process_state, **kw):
    if 'sequence_number' in kw:
      sequence_number = kw.pop('sequence_number')
      log.debug('_set_process_status(%s <= %s, seq=%s[force])' % (process_name,
        ProcessState._VALUES_TO_NAMES.get(process_state), sequence_number))
    else:
      current_run = self._current_process_run(process_name)
      if not current_run:
        assert process_state == ProcessState.WAITING
        sequence_number = 0
      else:
        sequence_number = current_run.seq + 1
      log.debug('_set_process_status(%s <= %s, seq=%s[auto])' % (process_name,
        ProcessState._VALUES_TO_NAMES.get(process_state), sequence_number))
    runner_ckpt = RunnerCkpt(process_status=ProcessStatus(
      process=process_name, state=process_state, seq=sequence_number, **kw))
    self._dispatcher.dispatch(self._state, runner_ckpt, self._recovery)

  def _task_process_from_process_name(self, process_name, sequence_number):
    """
      Construct a Process() object from a process_name, populated with its
      correct run number and fully interpolated commandline.
    """
    run_number = len(self.state.processes[process_name]) - 1
    pathspec = self._pathspec.given(process=process_name, run=run_number)
    process = self._process_map.get(process_name)
    if process is None:
      raise self.InternalError('FATAL: Could not find process: %s' % process_name)
    def close_ckpt_and_fork():
      pid = os.fork()
      if pid == 0 and self._ckpt is not None:
        self._ckpt.close()
      return pid
    return Process(
      process.name().get(),
      process.cmdline().get(),
      sequence_number,
      pathspec,
      self._sandbox,
      self._user,
      chroot=self._chroot,
      fork=close_ckpt_and_fork)

  def deadlocked(self, plan=None):
    """Check whether a plan is deadlocked, i.e. there are no running/runnable processes, and the
    plan is not complete."""
    plan = plan or self._regular_plan
    now = self._clock.time()
    running = list(plan.running)
    runnable = list(plan.runnable_at(now))
    waiting = list(plan.waiting_at(now))
    log.debug('running:%d runnable:%d waiting:%d complete:%s' % (
      len(running), len(runnable), len(waiting), plan.is_complete()))
    return len(running + runnable + waiting) == 0 and not plan.is_complete()

  def is_healthy(self):
    """Check whether the TaskRunner is healthy. A healthy TaskRunner is not deadlocked and has not
    reached its max_failures count."""
    max_failures = self._task.max_failures().get()
    deadlocked = self.deadlocked()
    under_failure_limit = max_failures == 0 or len(self._regular_plan.failed) < max_failures
    log.debug('max_failures:%d failed:%d under_failure_limit:%s deadlocked:%s ==> health:%s' % (
      max_failures, len(self._regular_plan.failed), under_failure_limit, deadlocked,
      not deadlocked and under_failure_limit))
    return not deadlocked and under_failure_limit

  def _current_process_run(self, process_name):
    if process_name not in self._state.processes or len(self._state.processes[process_name]) == 0:
      return None
    return self._state.processes[process_name][-1]

  def is_process_lost(self, process_name):
    """Determine whether or not we should mark a task as LOST and do so if necessary."""
    current_run = self._current_process_run(process_name)
    if not current_run:
      raise self.InternalError('No current_run for process %s!' % process_name)

    def forked_but_never_came_up():
      return current_run.state == ProcessState.FORKED and (
        self._clock.time() - current_run.fork_time > TaskRunner.LOST_TIMEOUT.as_(Time.SECONDS))

    def running_but_coordinator_died():
      if current_run.state != ProcessState.RUNNING:
        return False
      coordinator_pid, _, _ = TaskRunnerHelper.scan_process(self.state, process_name)
      if coordinator_pid is not None:
        return False
      elif self._watcher.has_data(process_name):
        return False
      return True

    if forked_but_never_came_up() or running_but_coordinator_died():
      log.info('Detected a LOST task: %s' % current_run)
      log.debug('  forked_but_never_came_up: %s' % forked_but_never_came_up())
      log.debug('  running_but_coordinator_died: %s' % running_but_coordinator_died())
      return True

    return False

  def _run_plan(self, plan):
    log.debug('Schedule pass:'******'running: %s' % ' '.join(plan.running))
    log.debug('finished: %s' % ' '.join(plan.finished))

    launched = []
    for process_name in plan.running:
      if self.is_process_lost(process_name):
        self._set_process_status(process_name, ProcessState.LOST)

    now = self._clock.time()
    runnable = list(plan.runnable_at(now))
    waiting = list(plan.waiting_at(now))
    log.debug('runnable: %s' % ' '.join(runnable))
    log.debug('waiting: %s' % ' '.join(
        '%s[T-%.1fs]' % (process, plan.get_wait(process)) for process in waiting))

    def pick_processes(process_list):
      if self._task.max_concurrency().get() == 0:
        return process_list
      num_to_pick = max(self._task.max_concurrency().get() - len(running), 0)
      return process_list[:num_to_pick]

    for process_name in pick_processes(runnable):
      tp = self._task_processes.get(process_name)
      if tp:
        current_run = self._current_process_run(process_name)
        assert current_run.state == ProcessState.WAITING
      else:
        self._set_process_status(process_name, ProcessState.WAITING)
        tp = self._task_processes[process_name]
      log.info('Forking Process(%s)' % process_name)
      tp.start()
      launched.append(tp)

    return len(launched) > 0

  def _terminate_plan(self, plan):
    for process in plan.running:
      last_run = self._current_process_run(process)
      if last_run and last_run.state in (ProcessState.FORKED, ProcessState.RUNNING):
        TaskRunnerHelper.terminate_process(self.state, process)

  def has_running_processes(self):
    """
      Returns True if any processes associated with this task have active pids.
    """
    process_tree = TaskRunnerHelper.scantree(self.state)
    return any(any(process_set) for process_set in process_tree.values())

  def has_active_processes(self):
    """
      Returns True if any processes are in non-terminal states.
    """
    return any(not TaskRunnerHelper.is_process_terminal(run.state) for run in
        filter(None, (self._current_process_run(process) for process in self.state.processes)))

  def collect_updates(self, timeout=None):
    """
      Collects and applies updates from process checkpoint streams.  Returns the number
      of applied process checkpoints.
    """
    if self.has_active_processes():
      sleep_interval = self.COORDINATOR_INTERVAL_SLEEP.as_(Time.SECONDS)
      total_time = 0.0
      while True:
        process_updates = self._watcher.select()
        for process_update in process_updates:
          self._dispatcher.dispatch(self._state, process_update, self._recovery)
        if process_updates:
          return len(process_updates)
        if timeout and total_time >= timeout:
          break
        total_time += sleep_interval
        self._clock.sleep(sleep_interval)
    return 0

  def is_terminal(self):
    return TaskRunnerHelper.is_task_terminal(self.task_state())

  def terminal_state(self):
    if self._terminal_state:
      log.debug('Forced terminal state: %s' %
          TaskState._VALUES_TO_NAMES.get(self._terminal_state, 'UNKNOWN'))
      return self._terminal_state
    else:
      return TaskState.SUCCESS if self.is_healthy() else TaskState.FAILED

  def run(self, force=False):
    """
      Entrypoint to runner. Assume control of checkpoint stream, and execute TaskRunnerStages
      until runner is terminal.
    """
    if self.is_terminal():
      return
    with self.control(force):
      self._run()

  def _run(self):
    iteration_time = self.MAX_ITERATION_TIME.as_(Time.SECONDS)
    while not self.is_terminal():
      start = self._clock.time()
      # step 1: execute stage corresponding to the state we're currently in
      runner = self._stages[self.task_state()]
      iteration_wait = runner.run()
      if iteration_wait is None:
        log.debug('Run loop: No more work to be done in state %s' %
            TaskState._VALUES_TO_NAMES.get(self.task_state(), 'UNKNOWN'))
        self._set_task_status(runner.transition_to())
        continue
      log.debug('Run loop: Work to be done within %.1fs' % iteration_wait)
      # step 2: check child process checkpoint streams for updates
      if not self.collect_updates(iteration_wait):
        # If we don't collect any updates, at least 'touch' the checkpoint stream
        # so as to prevent garbage collection.
        elapsed = self._clock.time() - start
        if elapsed < iteration_wait:
          log.debug('Update collection only took %.1fs, idling %.1fs' % (
              elapsed, iteration_wait - elapsed))
          self._clock.sleep(iteration_wait - elapsed)
        log.debug('Run loop: No updates collected, touching checkpoint.')
        os.utime(self._pathspec.getpath('runner_checkpoint'), None)
      # step 3: reap any zombie child processes
      TaskRunnerHelper.reap_children()

  def kill(self, force=False, terminal_status=TaskState.KILLED,
           preemption_wait=Amount(1, Time.MINUTES)):
    """
      Kill all processes associated with this task and set task/process states as terminal_status
      (defaults to KILLED)
    """
    log.debug('Runner issued kill: force:%s, preemption_wait:%s' % (
      force, preemption_wait))
    assert terminal_status in (TaskState.KILLED, TaskState.LOST)
    self._preemption_deadline = self._clock.time() + preemption_wait.as_(Time.SECONDS)
    with self.control(force):
      if self.is_terminal():
        log.warning('Task is not in ACTIVE state, cannot issue kill.')
        return
      self._terminal_state = terminal_status
      if self.task_state() == TaskState.ACTIVE:
        self._set_task_status(TaskState.CLEANING)
      self._run()

  def lose(self, force=False):
    """
      Mark a task as LOST and kill any straggling processes.
    """
    self.kill(force, preemption_wait=Amount(0, Time.SECONDS), terminal_status=TaskState.LOST)

  def _kill(self):
    processes = TaskRunnerHelper.scantree(self._state)
    for process, pid_tuple in processes.items():
      current_run = self._current_process_run(process)
      coordinator_pid, pid, tree = pid_tuple
      if TaskRunnerHelper.is_process_terminal(current_run.state):
        if coordinator_pid or pid or tree:
          log.warning('Terminal process (%s) still has running pids:' % process)
          log.warning('  coordinator_pid: %s' % coordinator_pid)
          log.warning('              pid: %s' % pid)
          log.warning('             tree: %s' % tree)
        TaskRunnerHelper.kill_process(self.state, process)
      else:
        if coordinator_pid or pid or tree:
          log.info('Transitioning %s to KILLED' % process)
          self._set_process_status(process, ProcessState.KILLED,
            stop_time=self._clock.time(), return_code=-1)
        else:
          log.info('Transitioning %s to LOST' % process)
          if current_run.state != ProcessState.WAITING:
            self._set_process_status(process, ProcessState.LOST)
示例#11
0
class Runner(object):
  RUN_JOB_SCRIPT = """
import os
import random
import sys
from twitter.common import log
from twitter.common.log.options import LogOptions
from twitter.thermos.config.loader import ThermosConfigLoader
from twitter.thermos.core.helper import TaskRunnerHelper
from twitter.thermos.core.runner import TaskRunner, TaskRunnerUniversalHandler
from thrift.TSerialization import serialize as thrift_serialize

random.seed(%(random_seed)d)

log.init('runner_base')
LogOptions.set_disk_log_level('DEBUG')

task = ThermosConfigLoader.load_json('%(filename)s')
task = task.tasks()[0].task

success_rate=%(success_rate)d

class AngryHandler(TaskRunnerUniversalHandler):
  def checkpoint(self, record):
    if not self._runner._recovery:
      if random.randint(0, 100) <= success_rate:
        super(AngryHandler, self).checkpoint(record)
      else:
        sys.exit(1)

sandbox = os.path.join('%(sandbox)s', '%(task_id)s')
args = {}
args['task_id'] = '%(task_id)s'
if %(portmap)s:
  args['portmap'] = %(portmap)s
args['universal_handler'] = AngryHandler

runner = TaskRunner(task, '%(root)s', sandbox, **args)
runner.run()

with open('%(state_filename)s', 'w') as fp:
  fp.write(thrift_serialize(runner.state))
"""

  def __init__(self, task, portmap={}, success_rate=100, random_seed=31337):
    """
      task = Thermos task
      portmap = port map
      success_rate = success rate of writing checkpoint to disk
    """
    self.task = task

    with temporary_file(cleanup=False) as fp:
      self.job_filename = fp.name
      fp.write(ThermosTaskWrapper(task).to_json())

    self.state_filename = tempfile.mktemp()
    self.tempdir = tempfile.mkdtemp()
    self.task_id = '%s-runner-base' % int(time.time()*1000000)
    self.sandbox = os.path.join(self.tempdir, 'sandbox')
    self.portmap = portmap
    self.cleaned = False
    self.pathspec = TaskPath(root = self.tempdir, task_id = self.task_id)
    self.script_filename = None
    self.success_rate = success_rate
    self.random_seed = random_seed
    self._run_count = 0

  @property
  def pid(self):
    return self.po.pid

  @property
  def root(self):
    return self.tempdir

  def run(self):
    self._run_count += 1
    atexit.register(self.cleanup)

    if self.script_filename:
      os.unlink(self.script_filename)

    with temporary_file(cleanup=False) as fp:
      self.script_filename = fp.name
      fp.write(self.RUN_JOB_SCRIPT % {
        'filename': self.job_filename,
        'sandbox': self.sandbox,
        'root': self.tempdir,
        'task_id': self.task_id,
        'state_filename': self.state_filename,
        'portmap': repr(self.portmap),
        'success_rate': self.success_rate,
        'random_seed': self.random_seed + self._run_count,
      })

    with environment_as(PYTHONPATH=os.pathsep.join(sys.path)):
      self.po = subprocess.Popen([sys.executable, self.script_filename],
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      try:
        so, se = self.po.communicate()
      except OSError as e:
        if e.errno == errno.ECHILD:
          so = se = 'Killed'
        else:
          raise

    rc = self.po.returncode
    if rc != 0:
      if os.path.exists(self.job_filename):
        config = open(self.job_filename).read()
      else:
        config = 'Nonexistent!'
      if 'THERMOS_DEBUG' in os.environ:
        print("Runner failed!\n\n\nconfig:%s\n\n\nstdout:%s\n\n\nstderr:%s\n\n\n" % (
            config, so, se))

    try:
      with open(self.state_filename, 'r') as fp:
        self.state = thrift_deserialize(RunnerState(), fp.read())
    except Exception as e:
      if 'THERMOS_DEBUG' in os.environ:
        print('Failed to load Runner state: %s' % e, file=sys.stderr)
      self.state = RunnerState()

    try:
      self.reconstructed_state = CheckpointDispatcher.from_file(
        self.pathspec.getpath('runner_checkpoint'))
    except:
      self.reconstructed_state = None
    self.initialized = True
    return rc

  def cleanup(self):
    if not self.cleaned:
      if hasattr(self, 'po'):
        try:
          self.po.kill()
        except:
          pass
      os.unlink(self.job_filename)
      os.unlink(self.script_filename)
      if 'THERMOS_DEBUG' not in os.environ:
        shutil.rmtree(self.tempdir, ignore_errors=True)
      else:
        print('Logs saved in %s' % self.tempdir)
      self.cleaned = True