Пример #1
0
Файл: tq.py Проект: abom/tq
class TaskQueue:
    def __init__(self):
        self.r = Redis()

    def schedule_fun(self, fun, *args, **kwargs):
        return self.schedule(new_job(fun, *args, **kwargs))

    def schedule(self, job):
        job.state = SCHEDULED
        print(self.r.lpush(WORKQ, job_dumps(job)))

    def _move_job_from_workq_to_activeq(self):
        self.r.brpoplpush(WORKQ, ACTIVEQ)

    def get_worker_job(self):
        if self.r.llen(ACTIVEQ) == 0:
            self._move_job_from_workq_to_activeq()
        activejob = self.r.lpop(ACTIVEQ)
        job = job_loads(activejob)
        return job
Пример #2
0
class Redline(object):

    # FIFO Queue
    def __init__(
        self,
        queue_name,
        host="localhost",
        port=6379,
        db=0,
        password=None,
        socket_timeout=None,
        connection_pool=None,
        charset="utf-8",
        errors="strict",
        unix_socket_path=None,
    ):
        # Initialize Redis
        self.redis = Redis(
            host=host,
            port=port,
            db=db,
            password=password,
            socket_timeout=socket_timeout,
            connection_pool=connection_pool,
            charset=charset,
            errors=errors,
            unix_socket_path=unix_socket_path,
        )
        self.queue_name = queue_name
        self.queue_key = "redline:queue:%s" % self.queue_name
        self.processing_key = "redline:processing:%s" % self.queue_name
        self.pending_key = "redline:pending:%s" % self.queue_name
        self.completed_key = "redline:completed:%s" % self.queue_name

    def push(self, element):
        print self.queue_key, element
        push_element = self.redis.lpush(self.queue_key, element)
        self.redis.sadd(self.pending_key, element)

    def push_unique(self, element):
        seen = self.check_seen(element)
        if not seen:
            self.push(self, element)

    def pop(self):
        popped_element = self.redis.rpoplpush(self.queue_key, self.processing_key)
        return popped_element

    def bpop(self):
        popped_element = self.redis.brpoplpush(self.queue_key, self.processing_key)
        return popped_element

    def mark_completed(self, element):
        # note, I think the latest version of redis.py has count as the middle arg
        self.redis.lrem(self.processing_key, element, 0)
        self.redis.sadd(self.completed_key, element)
        self.redis.srem(self.pending_key, element)

    def queue_size(self):
        return self.redis.llen(self.queue_key)

    def processing_size(self):
        return self.redis.llen(self.processing_key)

    def completed_size(self):
        set_members = self.redis.smembers(self.completed_key)
        return len(set_members)

    def empty(self):
        return self.queue_size() == 0 and self.processing_size() == 0

    def check_pending(self, element):
        return self.redis.sismember(self.pending_key, element)

    def check_completed(self, element):
        return self.redis.sismember(self.completed_key, element)

    def check_seen(self, element):
        pending = self.check_pending(element)
        completed = self.check_completed(element)
        return pending or completed
Пример #3
0
def push_redis():
    r = Redis(host='',port=6379,password='******')

    url = r.brpoplpush('master:start_urls','slave:urls',3)
    
    return url
Пример #4
0
conn.hgetall('hkey')
conn.hincrby('hkey', 'key', 1)
conn.hincrbyfloat('hkey', 'key', 2.3)

# list
conn.rpush('lkey', 1, 2, 3)
conn.lpush('lkey', 1, 2, 3)
conn.lpop('lkey')
conn.rpop('lkey')
conn.lrange('lkey', 0, -1) # return a list
conn.lindex('lkey', 2)
conn.ltrim('lkey', 1, -1)
conn.blpop(['list1', 'list2'], 1)
conn.brpop(['list1', 'list2'], 2)
conn.rpoplpush('list1', 'list2')
conn.brpoplpush('list1', 'list2', 3)

# set
conn.sadd('key', 'item1', 'item2')
conn.srem('key', 'item2')
conn.ismember('key', 'item') # not sure
conn.scard('key')
conn.smembers('key')
conn.smove('key1', 'key2', 'item')
conn.sdiff('key1', 'key2', 'key3') # 返回存在第一个集合,不在其他集合的元素
conn.sinter('key1', 'key2')
conn.sunion('key1', 'key2',)

# string
conn.set('key', '15')
conn.get('key')
Пример #5
0
Файл: tq.py Проект: xmonader/tq
class TaskQueueManager:
    def __init__(self):
        self.r = Redis()

    def schedule_fun(self, fun, *args, **kwargs):
        return self.schedule(new_job(fun, *args, **kwargs))

    def schedule(self, job):
        job.state = JobState.SCHEDULED
        self.save_job(job)
        print(self.r.lpush(WAITING_Q, job.job_key))
        return job

    def _move_job_from_waiting_q_to_worker_q(self, worker_q):
        return self.r.brpoplpush(WAITING_Q, worker_q)

    def get_worker_job_from_worker_queue(self, worker_q):
        activejob = self._move_job_from_waiting_q_to_worker_q(worker_q)
        return activejob

    def job_dumps(self, job):
        return dill_dumps(job)

    def job_loads(self, data):
        return dill_loads(data)

    def job_to_success(self, job):
        job.state = JobState.SUCCESS
        now = time.time()
        job.last_modified_time = now
        job.done_time = now
        return job

    def prepare_to_reschedule(self, job):
        job.worker_id = None
        return job

    def set_job_result(self, job, value):
        job.result = value
        print("setting result to : ", value)
        dumped_value = dill_dumps(value)
        self.r.set(job.job_result_key, dumped_value)
        self.r.set(job.job_result_key_cached, dumped_value)
        self.save_job(job)
        return job

    def get_job_result_from_redis(self, job):
        try:
            if self.r.exists(job.job_result_key_cached):
                val = dill_loads(self.r.get(job.job_result_key_cached))
            else:
                val = dill_loads(self.r.get(job.job_result_key))
        except Exception as e:
            raise e

    def get_job_result(self, job, raise_exception=False):
        if raise_exception and job.state == JobState.FAILURE:
            job.safe_to_collect = True
            self.save_job(job)
            raise RuntimeError(job.error)

        job = tm.get_job(job.job_key)
        val = None
        try:
            if job.memoized and self.r.exists(job.job_result_key_cached):
                val = dill_loads(self.r.get(job.job_result_key_cached))
            else:
                val = dill_loads(self.r.get(job.job_result_key))
        except Exception as e:
            print("[-] error getting job result: ", e)
        job.safe_to_collect = True
        self.save_job(job)
        return val

    def wait_job(self, job):
        job = tm.get_job(job.job_key)

        while True:
            if job.state in [JobState.SUCCESS, JobState.FAILURE]:
                break
            job = tm.get_job(job.job_key)
            print(job.state, job.job_key)

            sleep(1)

    def save_job(self, job):
        self.r.set(job.job_key, self.job_dumps(job))

    def get_job(self, job_key):
        return self.job_loads(self.r.get(job_key))

    def job_to_failure(self, job):
        job.retries -= 1
        job.state = JobState.FAILURE
        job.error = str(traceback.format_exc())
        job.last_modified_time = time.time()
        return job

    def clean_job_from_worker_queue(self, jobkey, worker_queue):
        self.r.lrem(worker_queue, jobkey)

    def get_jobs_of_worker(self, worker_id):
        return self.r.lrange("tq:queue:worker:{}".format(worker_id), 0, -1)

    def get_jobs_of_worker_queue(self, worker_queue):
        return self.r.lrange(worker_queue, 0, -1)

    def get_all_jobs_keys(self):
        return self.r.keys("tq:job:*")

    def get_worker_queues(self):
        return self.r.keys("tq:queue:worker*")

    def get_worker_last_seen(self, worker_id):
        last_seen_key = "tq:worker-{}-last_seen".format(worker_id)
        last_seen = self.tm.r.get(last_seen_key).decode()
        return last_seen