Beispiel #1
0
def append_log(task_id):
    content = flask.request.get_data()
    task.append_log(redis, taskfile_dir, task_id, content, max_log_size)
    duration = flask.request.args.get('duration')
    try:
        if duration is not None:
            duration = int(duration)
    except ValueError:
        abort(flask.make_response(flask.jsonify(message="invalid duration value"), 400))
    try:
        task.beat(redis, task_id, duration, None)
    except Exception as e:
        abort(flask.make_response(flask.jsonify(message=str(e)), 400))
    return flask.jsonify(200)
Beispiel #2
0
    def _advance_task(self, task_id):
        """Tries to advance the task to the next status. If it can, re-queue it immediately
        to process the next stage. Otherwise, re-queue it after some delay to try again.
        """
        keyt = 'task:%s' % task_id
        with self._redis.acquire_lock(keyt, acquire_timeout=1, expire_time=600):
            status = self._redis.hget(keyt, 'status')
            if status == 'stopped':
                return

            service_name = self._redis.hget(keyt, 'service')
            if service_name not in self._services:
                raise ValueError('unknown service %s' % service_name)
            service = self._services[service_name]

            self._logger.info('%s: trying to advance from status %s', task_id, status)

            if status == 'queued':
                resource = self._redis.hget(keyt, 'resource')
                parent = self._redis.hget(keyt, 'parent')
                if parent:
                    keyp = 'task:%s' % parent
                    # if the parent task is in the database, check for dependencies
                    if self._redis.exists(keyp):
                        status = self._redis.hget(keyp, 'status')
                        if status == 'stopped':
                            if self._redis.hget(keyp, 'message') != 'completed':
                                task.terminate(self._redis, task_id, phase='dependency_error')
                                return
                        else:
                            self._logger.warning('%s: depending on other task, waiting', task_id)
                            task.service_queue(self._redis, task_id, service.name)
                            return
                ngpus = int(self._redis.hget(keyt, 'ngpus'))
                resource, available_gpus = self._allocate_resource(task_id, resource, service, ngpus)
                if resource is not None:
                    self._logger.info('%s: resource %s reserved (%d/%d)',
                                      task_id, resource, available_gpus, ngpus)
                    self._redis.hset(keyt, 'alloc_resource', resource)
                    if ngpus == available_gpus:
                        task.set_status(self._redis, keyt, 'allocated')
                    else:
                        task.set_status(self._redis, keyt, 'allocating')
                    task.work_queue(self._redis, task_id, service_name)
                else:
                    self._logger.warning('%s: no resources available, waiting', task_id)
                    task.service_queue(self._redis, task_id, service.name)
            elif status == 'allocating':
                resource = self._redis.hget(keyt, 'alloc_resource')
                keyr = 'resource:%s:%s' % (service.name, resource)
                ngpus = int(self._redis.hget(keyt, 'ngpus'))
                already_allocated_gpus = 0
                for k, v in six.iteritems(self._redis.hgetall(keyr)):
                    if v == task_id:
                        already_allocated_gpus += 1
                capacity = service.list_resources()[resource]
                available_gpus, remaining_gpus = self._reserve_resource(service, resource,
                                                                        capacity, task_id,
                                                                        ngpus - already_allocated_gpus,
                                                                        0, -1, True)
                self._logger.warning('task: %s - resource: %s (capacity %d)- already %d - available %d', task_id, resource, capacity, already_allocated_gpus, available_gpus)
                if available_gpus == ngpus - already_allocated_gpus:
                    task.set_status(self._redis, keyt, 'allocated')
                    key_reserved = 'reserved:%s:%s' % (service.name, resource)
                    self._redis.delete(key_reserved)
                    task.work_queue(self._redis, task_id, service.name)
                else:
                    task.work_queue(self._redis, task_id, service.name,
                                    delay=service.is_notifying_activity and 120 or 30)
            elif status == 'allocated':
                content = json.loads(self._redis.hget(keyt, 'content'))
                resource = self._redis.hget(keyt, 'alloc_resource')
                self._logger.info('%s: launching on %s', task_id, service.name)
                try:
                    keyr = 'resource:%s:%s' % (service.name, resource)
                    lgpu = []
                    for k, v in six.iteritems(self._redis.hgetall(keyr)):
                        if v == task_id:
                            lgpu.append(k)
                    self._redis.hset(keyt, 'alloc_lgpu', ",".join(lgpu))
                    data = service.launch(
                        task_id,
                        content['options'],
                        lgpu,
                        resource,
                        content['docker']['registry'],
                        content['docker']['image'],
                        content['docker']['tag'],
                        content['docker']['command'],
                        task.file_list(self._redis, task_id),
                        content['wait_after_launch'])
                except EnvironmentError as e:
                    # the resource is not available and will be set busy
                    self._block_resource(resource, service, str(e))
                    # set the task as queued again
                    self._redis.hdel(keyt, 'alloc_resource')
                    self._release_resource(service, resource, task_id)
                    task.set_status(self._redis, keyt, 'queued')
                    task.service_queue(self._redis, task_id, service.name)
                    self._logger.info('could not launch [%s] %s on %s: blocking resource', str(e), task_id, resource)
                    return
                except Exception as e:
                    # all other errors make the task fail
                    task.append_log(self._redis, task_id, str(e))
                    task.terminate(self._redis, task_id, phase='launch_error')
                    return
                self._logger.info('%s: task started on %s', task_id, service.name)
                self._redis.hset(keyt, 'job', json.dumps(data))
                task.set_status(self._redis, keyt, 'running')
                # For services that do not notify their activity, we should
                # poll the task status more regularly.
                task.work_queue(self._redis, task_id, service.name,
                                delay=service.is_notifying_activity and 120 or 30)

            elif status == 'running':
                self._logger.debug('- checking activity of task: %s', task_id)
                data = json.loads(self._redis.hget(keyt, 'job'))
                status = service.status(task_id, data)
                if status == 'dead':
                    self._logger.info('%s: task no longer running on %s, request termination',
                                      task_id, service.name)
                    task.terminate(self._redis, task_id, phase='exited')
                else:
                    task.work_queue(self._redis, task_id, service.name,
                                    delay=service.is_notifying_activity and 120 or 30)

            elif status == 'terminating':
                data = self._redis.hget(keyt, 'job')
                if data is not None:
                    container_id = self._redis.hget(keyt, 'container_id')
                    data = json.loads(data)
                    data['container_id'] = container_id
                    self._logger.info('%s: terminating task (%s)', task_id, json.dumps(data))
                    try:
                        service.terminate(data)
                        self._logger.info('%s: terminated', task_id)
                    except Exception:
                        self._logger.warning('%s: failed to terminate', task_id)
                resource = self._redis.hget(keyt, 'alloc_resource')
                self._release_resource(service, resource, task_id)
                task.set_status(self._redis, keyt, 'stopped')
                task.disable(self._redis, task_id)
Beispiel #3
0
 def _handle_allocated_task(self, task_id):
     keyt = 'task:%s' % task_id
     _, service = self._get_service(keyt=keyt)
     content = json.loads(self._redis.hget(keyt, 'content'))
     resource = self._redis.hget(keyt, 'alloc_resource')
     self._logger.info('%s: launching on %s', task_id, service.name)
     try:
         entity_config = self._get_current_config(task_id)
         keygr = 'gpu_resource:%s:%s' % (service.name, resource)
         lgpu = []
         for k, v in six.iteritems(self._redis.hgetall(keygr)):
             if v == task_id:
                 lgpu.append(k)
         self._redis.hset(keyt, 'alloc_lgpu', ",".join(lgpu))
         keycr = 'cpu_resource:%s:%s' % (service.name, resource)
         lcpu = []
         for k, v in six.iteritems(self._redis.hgetall(keycr)):
             if v == task_id:
                 lcpu.append(k)
         self._redis.hset(keyt, 'alloc_lcpu', ",".join(lcpu))
         data = service.launch(
             task_id, content['options'], (lgpu, lcpu), resource,
             entity_config["storages"], entity_config["docker"],
             content['docker']['registry'], content['docker']['image'],
             content['docker']['tag'], content['docker']['command'],
             task.file_list(self._taskfile_dir,
                            task_id), content['wait_after_launch'],
             self._redis.hget(keyt, 'token'),
             content.get('support_statistics'))
     except EnvironmentError as e:
         # the resource is not available and will be set busy
         self._block_resource(resource, service, str(e))
         self._redis.hdel(keyt, 'alloc_resource')
         # set the task as queued again
         self._release_resource(
             service, resource, task_id,
             Capacity(self._redis.hget(keyt, 'ngpus'),
                      self._redis.hget(keyt, 'ncpus')))
         status = self._redis.hget(keyt, 'status')
         if status == 'terminating':
             return None
         task.set_status(self._redis, keyt, 'queued')
         task.service_queue(self._redis, task_id, service.name)
         self._logger.info(
             'could not launch [%s] %s on %s: blocking resource', str(e),
             task_id, resource)
         self._logger.info(traceback.format_exc())
         return None
     except Exception as e:
         # all other errors make the task fail
         self._logger.info('fail task [%s] - %s', task_id, str(e))
         self._logger.info(traceback.format_exc())
         task.append_log(self._taskfile_dir, task_id, str(e))
         auth_token = self._redis.hget(keyt, 'token')
         callback_url = service._config.get('callback_url')
         if auth_token:
             callback_url = callback_url.replace("://",
                                                 "://" + auth_token + ":x@")
         r = requests.get(os.path.join(callback_url, "task/terminate",
                                       task_id),
                          params={'phase': 'launch_error'})
         if r.status_code != 200:
             raise RuntimeError(
                 'incorrect result from \'task/terminate\' service: %s' %
                 r.text) from e
         task.terminate(self._redis, task_id, phase='launch_error')
         self._logger.info(traceback.format_exc())
         return None
     self._logger.info('%s: task started on %s', task_id, service.name)
     self._redis.hset(keyt, 'job', json.dumps(data))
     status = self._redis.hget(keyt, 'status')
     if status == 'terminating':
         return None
     task.set_status(self._redis, keyt, 'running')
     # For services that do not notify their activity, we should
     # poll the task status more regularly.
     task.work_queue(self._redis,
                     task_id,
                     service.name,
                     delay=service.is_notifying_activity and 120 or 30)
     return None
Beispiel #4
0
def append_log(task_id):
    content = flask.request.get_data()
    task.append_log(redis, task_id, content)
    return flask.jsonify(200)
Beispiel #5
0
def append_log(task_id):
    content = flask.request.get_data()
    task.append_log(redis, taskfile_dir, task_id, content, max_log_size)
    return flask.jsonify(200)