def get_redis_memory_used(connection=None): """ All memory used in redis rq: namespace :param connection: :return: """ RQ_REDIS_NAMESPACE = 'rq:*' redis_connection = resolve_connection(connection) script = """ local sum = 0; local keys = {}; local done = false; local cursor = "0" repeat local result = redis.call("SCAN", cursor, "match", ARGV[1]) cursor = result[1]; keys = result[2]; for i, key in ipairs(keys) do local mem = redis.call("MEMORY", "USAGE", key); sum = sum + mem; end if cursor == "0" then done = true; end until done return sum; """ script = redis_connection.register_script(script) return humanize.naturalsize(script(args=[RQ_REDIS_NAMESPACE]))
def __init__(self, queue_name='default', interval=60, connection=None): from rq.connections import resolve_connection self.connection = resolve_connection(connection) self.queue_name = queue_name self._interval = interval self.log = logger self._lock_acquired = False
def __init__(self, queue_name="default", interval=60, connection=None): from rq.connections import resolve_connection self.connection = resolve_connection(connection) self.queue_name = queue_name self._interval = interval self.log = logger
def __init__(self, queue_name='default', queue=None, interval=60, connection=None, job_class=None, queue_class=None, name=None): from rq.connections import resolve_connection self.connection = resolve_connection(connection) self._queue = queue if self._queue is None: self.queue_name = queue_name else: self.queue_name = self._queue.name self._interval = interval self.log = logger self._lock_acquired = False self.job_class = backend_class(self, 'job_class', override=job_class) self.queue_class_name = None if isinstance(queue_class, string_types): self.queue_class_name = queue_class self.queue_class = backend_class(self, 'queue_class', override=queue_class) self.name = name or uuid4().hex
def __init__(self, job_class=None, queue_class=None, interval=1, connection=None): self.interval = interval self.log = logger self.connection = resolve_connection(connection) self.job_class = job_class if job_class is not None else Job self.queue_class = queue_class if queue_class is not None else Queue self._queue_cache = {}
def all(cls, connection=None, job_class=None, serializer=None): """Returns an iterable of all Queues. """ connection = resolve_connection(connection) def to_queue(queue_key): return cls.from_queue_key(as_text(queue_key), connection=connection, job_class=job_class, serializer=serializer) return [to_queue("{0}{1}".format(cls.redis_queue_namespace_prefix, namespace))]
def __init__(self, queue_name='default', interval=60, connection=None, scheduler_key=None, scheduled_jobs_key=None): from rq.connections import resolve_connection self.connection = resolve_connection(connection) self.queue_name = queue_name self._interval = interval self.log = logger self._lock_acquired = False if scheduler_key: self.scheduler_key = scheduler_key if scheduled_jobs_key: self.scheduled_jobs_key = scheduled_jobs_key
def lpop(cls, queue_keys, blocking, connection=None): """Helper method. Intermediate method to abstract away from some Redis API details, where LPOP accepts only a single key, whereas BLPOP accepts multiple. So if we want the non-blocking LPOP, we need to iterate over all queues, do individual LPOPs, and return the result. Until Redis receives a specific method for this, we'll have to wrap it this way. """ connection = resolve_connection(connection) timestamp = int(time.time()) for queue_key in queue_keys: values = connection.zrevrangebyscore(queue_key, timestamp, 0) if values: connection.zremrangebyscore(queue_key, 0, timestamp) yield queue_key, values
def empty_registry(registry_name, queue_name, connection=None): """Empties a specific registry for a specific queue, Not in RQ, implemented here for performance reasons """ redis_connection = resolve_connection(connection) queue_instance = Queue.from_queue_key(Queue.redis_queue_namespace_prefix + queue_name) registry_instance = None if registry_name == 'failed': registry_instance = queue_instance.failed_job_registry elif registry_name == 'started': registry_instance = queue_instance.started_job_registry elif registry_name == 'scheduled': registry_instance = queue_instance.scheduled_job_registry elif registry_name == 'deferred': registry_instance = queue_instance.deferred_job_registry elif registry_name == 'finished': registry_instance = queue_instance.finished_job_registry script = """ local prefix = "{0}" local q = KEYS[1] local count = 0 while true do local job_id, score = unpack(redis.call("zpopmin", q)) if job_id == nil or score == nil then break end -- Delete the relevant keys redis.call("del", prefix..job_id) redis.call("del", prefix..job_id..":dependents") count = count + 1 end return count """.format( registry_instance.job_class.redis_job_namespace_prefix).encode("utf-8") script = redis_connection.register_script(script) return script(keys=[registry_instance.key])
def get_job_ids(self, offset=0, length=-1, sort_key='created_at', sort_order='asc'): """ Builds list of all failed jobs. Will be incredibly slow and memory-consuming in case of too many failed jobs, but leaving It this way for simplicity. """ if sort_order not in ('asc', 'desc'): raise ValueError('Invalid sort_order: %s' % sort_order) if self._job_ids is None: result = [] key_template = 'rq:job:{job_id}' for queue, registry in self._registries: job_ids = registry.get_job_ids() connection = resolve_connection(queue.connection) pipeline = connection.pipeline() for job_id in job_ids: pipeline.hget(key_template.format(job_id=job_id), sort_key) creation_dates = pipeline.execute() result.extend(zip(job_ids, creation_dates)) # Sorting failed jobs globally result.sort(key=lambda pair: pair[1] or b'', reverse=(sort_order == 'desc')) self._job_ids = [pair[0] for pair in result] # Dirty hack to turn redis range to python list range start = offset if length >= 0: end = offset + length else: end = length if length == -1: return self._job_ids[start:] return self._job_ids[start:end]
def __init__(self, queue_name='default', interval=60, connection=None): self.connection = resolve_connection(connection) self.queue_name = queue_name self._interval = interval self.log = logger
def test_01_redis_running(self): conn = resolve_connection() try: conn.echo('ping') except ConnectionError: print 'No connection; redis is not running. Start the redis server.'
def from_name(cls, queue_name, connection=None): connection = resolve_connection(connection) queue = RestQueue(queue_name, connection=connection) return queue if queue.is_rest() else None