示例#1
0
文件: async.py 项目: rmin/unicron
def get(self, task_id):
    '''Get status and result for one async task on Redis'''
    rds = utils.redis_connect()
    res = rds.get(config.celery_tasks + task_id)
    if res is None:
        return None
    return json.loads(utils.convert2str(res))
示例#2
0
def run_scheduler(lock_key):
    '''Scheduler function for sending async tasks'''
    if config.stdlogs:
        logger.warning("running scheduler with key " + lock_key)
    rds = utils.redis_connect()
    # process scheduler lock
    slock = utils.convert2str(rds.get(config.scheduler_lock))
    # it is locked by another scheduler process
    if slock is not None and str(slock) != lock_key:
        if config.stdlogs:
            logger.warning("exiting scheduler. locked by " + slock)
        return
    # add or update lock and set new expiration
    if config.stdlogs:
        logger.warning("updating scheduler lock " + lock_key)
    rds.set(config.scheduler_lock, lock_key)
    rds.expire(config.scheduler_lock, config.scheduler_locktimer)
    # set now
    tz = pytz.timezone(celeryconfig.timezone)
    now = tz.localize(datetime.datetime.now())
    now = now.replace(second=0, microsecond=0)
    now_str = now.strftime("%Y-%m-%d %H:%M:%S")
    # process tasks
    for key in rds.scan_iter(config.scheduler_jobs + ":*"):
        key = key.decode()
        job = utils.convert2str(rds.hgetall(key))
        # skip if we have already created the job
        if job["last_run"] == now_str:
            if config.stdlogs:
                logger.warning("skip sent job")
            continue
        # just update last_run and go to next job
        if len(job["last_run"]) == 0:
            rds.hset(key, "last_run", now_str)
            continue
        # find the next run time
        job["last_run"] = tz.localize(
            datetime.datetime.strptime(job["last_run"], "%Y-%m-%d %H:%M:%S"))
        next_run = croniter.croniter(
            job["schedule"], job["last_run"]).get_next(datetime.datetime)
        next_run = next_run.replace(second=0, microsecond=0)
        while next_run < now:
            next_run = croniter.croniter(job["schedule"],
                                         next_run).get_next(datetime.datetime)
        # update last_run and create the job
        if next_run.strftime("%Y-%m-%d %H:%M:00") != now_str:
            continue
        try:
            # create async task to run once immediately
            r = requests.post(config.unicron_api + "/async/",
                              json=utils.convert2str(
                                  json.loads(utils.decrypt(job["info"]))))
            if config.stdlogs:
                logger.warning("job scheduled: " + utils.convert2str(r.text))
            # update last_run of the job
            rds.hset(key, "last_run", now_str)
            rds.hincrby(key, "run_count")
        except Exception as e:
            if config.stdlogs:
                logger.warning("request error " + str(e))
示例#3
0
def get(self, job_id):
    '''Get one scheduled job info'''
    rds = utils.redis_connect()
    job = utils.convert2str(rds.hgetall(config.scheduler_jobs + ":" + job_id))
    if "info" not in job:
        return None
    # data saved as json in Redis
    job["info"] = json.loads(utils.decrypt(job["info"]))
    return job
示例#4
0
def getAll(self):
    '''Get all scheduled jobs'''
    jobs = {}
    rds = utils.redis_connect()
    for key in rds.scan_iter(config.scheduler_jobs + ":*"):
        key = key.decode()
        job = utils.convert2str(rds.hgetall(key))
        if "info" not in job:
            continue
        # info saved as json in Redis
        job["info"] = utils.convert2str(json.loads(utils.decrypt(job["info"])))
        jobs[utils.remove_prefix(key, config.scheduler_jobs + ":")] = job
    return jobs
示例#5
0
def getResult(self, job_id):
    '''Get results for one scheduled job'''
    results = []
    rds = utils.redis_connect()
    for key in rds.scan_iter(config.celery_tasks + "*"):
        t = json.loads(utils.convert2str(rds.get(key.decode())))
        if "result" not in t:
            continue
        if "output" not in t["result"]:
            continue
        task_res = t["result"]["output"]
        if type(task_res) is not dict:
            continue
        # find results with the right job_id
        if "job_id" in task_res and task_res["job_id"] == job_id:
            results.append(t["result"])
    return results
示例#6
0
def getResultAll(self):
    '''Get results for all scheduled jobs'''
    results = []
    rds = utils.redis_connect()
    for key in rds.scan_iter(config.celery_tasks + "*"):
        t = json.loads(utils.convert2str(rds.get(key.decode())))
        if "result" not in t:
            continue
        if "output" not in t["result"]:
            continue
        task_res = t["result"]["output"]
        if type(task_res) is not dict:
            continue
        # check if there is a job_id in the task output
        if "job_id" in task_res and task_res["job_id"] is not None:
            results.append(t["result"])
    return results
示例#7
0
def create(self, schedule, **kwargs):
    '''Create a new scheduled job'''
    if not croniter.croniter.is_valid(schedule):
        raise Exception("bad cron expression")
    if kwargs["job_id"] is None or len(kwargs["job_id"]) == 0:
        raise Exception("job_id can not be empty")
    if len(kwargs["http_url"]) == 0 and len(kwargs["command"]) == 0:
        raise Exception("you need to provide http_url or command")
    rds = utils.redis_connect()
    result = rds.hmset(
        config.scheduler_jobs + ":" + kwargs["job_id"], {
            "last_run": "",
            "run_count": 0,
            "schedule": schedule,
            "info": utils.encrypt(json.dumps(utils.convert2str(kwargs)))
        })
    if result:
        return kwargs["job_id"]
    return False
示例#8
0
def delete(self, job_id):
    '''Delete scheduled job'''
    rds = utils.redis_connect()
    return rds.delete(config.scheduler_jobs + ":" + job_id)