def task_status(id): status = None with Database() as db: status = db.select_status(id) if status is None: abort(404) return status
def run(self): # ignore term and int signals, use shutdown function to quit signal(SIGTERM, ignore_signal) signal(SIGINT, ignore_signal) # connect to the database with Database() as db: # keep processing until told to stop while not self.exit.is_set(): # get the next task using a lock to prevent duplicate processing self.lock.acquire() task = db.get_next_task() self.lock.release() # continue if the are no available tasks if task is None: time.sleep(1) continue try: # get the plugin for this task plugin = plugins[task.plugin] # determine timeout time timeout = config.getint("lock_type_default", "timeout") if config.has_option("lock_type_{}".format(task.lock_type), "timeout"): timeout = config.getint( "lock_type_{}".format(task.lock_type), "timeout") timeout_time = task.locked_at + timedelta(seconds=timeout) # check if task has timed out if datetime.now() > timeout_time: logging.info("task {} lock timed out".format(task.id)) plugin.on_lock_timeout(task) continue # run the task status = plugin.process(task) # if the plugin did not return a status then mark task as complete if status is None: task.complete() # report plugin failure except Exception as e: task.fail(str(e)) traceback.print_exc()
def lock_acquire(lock_type): logging.debug("request for new {} lock".format(lock_type)) # calculate unique id # TODO: loop until we get unique id? logging.debug("calculating task id") md5_hasher = md5() idstr = "{}{}".format(lock_type, datetime.now()) md5_hasher.update(idstr.encode('utf-8')) id = md5_hasher.hexdigest().upper() logging.debug("task id = {}".format(id)) # create new task with Database() as db: db.insert_task(id, "lock", lock_type) # return task id return id
def task_create(plugin): logging.debug("request for new {} task".format(plugin)) # make sure plugin exists if plugin not in plugins: logging.error("plugin not found: {}".format(plugin)) abort(404) # get posted json logging.debug("loading request json") request_json = request.get_json(force=True) # calculate task id logging.debug("calculating task id") md5_hasher = md5() md5_hasher.update(plugin.encode('utf-8')) md5_hasher.update(json.dumps(request_json).encode('utf-8')) id = md5_hasher.hexdigest().upper() logging.debug("task id = {}".format(id)) # create task dir if it does not exist already task_dir = os.path.join("tasks", id[0:2]) if not os.path.isdir(task_dir): os.mkdir(task_dir) task_dir = os.path.join(task_dir, id) if not os.path.isdir(task_dir): os.mkdir(task_dir) # store request json in task dir request_file = os.path.join(task_dir, 'request.json') if not os.path.isfile(request_file): with open(request_file, "w") as fp: json.dump(request_json, fp) lock_type = plugins[plugin].lock_type() # create new task with Database() as db: db.insert_task(id, plugin, lock_type) # return task id return id
def display_stats(): stats = "" with Database() as db: for stat in db.select_statistics(): stats += "{}: {}\n".format(stat[0], stat[1]) return stats
def lock_release(id): # complete the task with Database() as db: db.complete_task(id) return ""
def lock_keep_alive(id): with Database() as db: status = db.lock_keep_alive(id) return ""
os.dup2(0, 1) os.dup2(0, 2) shutdown = False def handler(signum, frame): global shutdown shutdown = True signal(SIGTERM, handler) signal(SIGINT, handler) try: with Database() as db: # requeue any tasks that were left in the running state db.queue_running_tasks() # spawn configured number of threads for index in range(config.getint("chronos", "num_workers")): logging.info("starting worker") spawn_worker() # manage locks while not shutdown: try: locks = db.select_locks() if not locks: time.sleep(1) continue