def run_scheduler(lock_key): '''Scheduler function for sending async tasks''' if config.stdlogs: logger.warning("running scheduler with key " + lock_key) rds = utils.redis_connect() # process scheduler lock slock = utils.convert2str(rds.get(config.scheduler_lock)) # it is locked by another scheduler process if slock is not None and str(slock) != lock_key: if config.stdlogs: logger.warning("exiting scheduler. locked by " + slock) return # add or update lock and set new expiration if config.stdlogs: logger.warning("updating scheduler lock " + lock_key) rds.set(config.scheduler_lock, lock_key) rds.expire(config.scheduler_lock, config.scheduler_locktimer) # set now tz = pytz.timezone(celeryconfig.timezone) now = tz.localize(datetime.datetime.now()) now = now.replace(second=0, microsecond=0) now_str = now.strftime("%Y-%m-%d %H:%M:%S") # process tasks for key in rds.scan_iter(config.scheduler_jobs + ":*"): key = key.decode() job = utils.convert2str(rds.hgetall(key)) # skip if we have already created the job if job["last_run"] == now_str: if config.stdlogs: logger.warning("skip sent job") continue # just update last_run and go to next job if len(job["last_run"]) == 0: rds.hset(key, "last_run", now_str) continue # find the next run time job["last_run"] = tz.localize( datetime.datetime.strptime(job["last_run"], "%Y-%m-%d %H:%M:%S")) next_run = croniter.croniter( job["schedule"], job["last_run"]).get_next(datetime.datetime) next_run = next_run.replace(second=0, microsecond=0) while next_run < now: next_run = croniter.croniter(job["schedule"], next_run).get_next(datetime.datetime) # update last_run and create the job if next_run.strftime("%Y-%m-%d %H:%M:00") != now_str: continue try: # create async task to run once immediately r = requests.post(config.unicron_api + "/async/", json=utils.convert2str( json.loads(utils.decrypt(job["info"])))) if config.stdlogs: logger.warning("job scheduled: " + utils.convert2str(r.text)) # update last_run of the job rds.hset(key, "last_run", now_str) rds.hincrby(key, "run_count") except Exception as e: if config.stdlogs: logger.warning("request error " + str(e))
def getAll(self): '''Get all scheduled jobs''' jobs = {} rds = utils.redis_connect() for key in rds.scan_iter(config.scheduler_jobs + ":*"): key = key.decode() job = utils.convert2str(rds.hgetall(key)) if "info" not in job: continue # info saved as json in Redis job["info"] = utils.convert2str(json.loads(utils.decrypt(job["info"]))) jobs[utils.remove_prefix(key, config.scheduler_jobs + ":")] = job return jobs
def get(self, task_id): '''Get status and result for one async task on Redis''' rds = utils.redis_connect() res = rds.get(config.celery_tasks + task_id) if res is None: return None return json.loads(utils.convert2str(res))
def get(self, job_id): '''Get one scheduled job info''' rds = utils.redis_connect() job = utils.convert2str(rds.hgetall(config.scheduler_jobs + ":" + job_id)) if "info" not in job: return None # data saved as json in Redis job["info"] = json.loads(utils.decrypt(job["info"])) return job
def run(self, job_id, name, http_url, http_method, http_headers, http_data, command, email, hook_url, hook_headers): '''Run a new async task''' # first check if we need to run a shell command if len(command): cmd = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = cmd.communicate() result = { "job_id": job_id, "stdout": utils.convert2str(stdout), "stderr": utils.convert2str(stderr) } # run email and web hook utils.send_email(name, result, email) utils.call_url(hook_url, "post", result, hook_headers) return result # if we need to call a url if len(http_url): if http_method.lower() not in [ "get", "post", "put", "delete", "head", "options", "patch" ]: raise Exception("http method not supported") resp = utils.call_url(http_url, http_method, http_data, http_headers) result = { "job_id": job_id, "status_code": resp.status_code, "text": utils.convert2str(resp.text) } # run email and web hook utils.send_email(name, result, email) utils.call_url(hook_url, "post", result, hook_headers) return result return None
def getResult(self, job_id): '''Get results for one scheduled job''' results = [] rds = utils.redis_connect() for key in rds.scan_iter(config.celery_tasks + "*"): t = json.loads(utils.convert2str(rds.get(key.decode()))) if "result" not in t: continue if "output" not in t["result"]: continue task_res = t["result"]["output"] if type(task_res) is not dict: continue # find results with the right job_id if "job_id" in task_res and task_res["job_id"] == job_id: results.append(t["result"]) return results
def getResultAll(self): '''Get results for all scheduled jobs''' results = [] rds = utils.redis_connect() for key in rds.scan_iter(config.celery_tasks + "*"): t = json.loads(utils.convert2str(rds.get(key.decode()))) if "result" not in t: continue if "output" not in t["result"]: continue task_res = t["result"]["output"] if type(task_res) is not dict: continue # check if there is a job_id in the task output if "job_id" in task_res and task_res["job_id"] is not None: results.append(t["result"]) return results
def create(self, schedule, **kwargs): '''Create a new scheduled job''' if not croniter.croniter.is_valid(schedule): raise Exception("bad cron expression") if kwargs["job_id"] is None or len(kwargs["job_id"]) == 0: raise Exception("job_id can not be empty") if len(kwargs["http_url"]) == 0 and len(kwargs["command"]) == 0: raise Exception("you need to provide http_url or command") rds = utils.redis_connect() result = rds.hmset( config.scheduler_jobs + ":" + kwargs["job_id"], { "last_run": "", "run_count": 0, "schedule": schedule, "info": utils.encrypt(json.dumps(utils.convert2str(kwargs))) }) if result: return kwargs["job_id"] return False