def interval(self, interval, queue_id, queue_type='default'): """Updates the interval for a specific queue_id of a particular queue type. """ # validate all the input if not is_valid_interval(interval): raise BadArgumentException('`interval` has an invalid value.') if not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') # generate the interval key interval_hmap_key = '%s:interval' % self._key_prefix interval_queue_key = '%s:%s' % (queue_type, queue_id) keys = [interval_hmap_key, interval_queue_key] args = [interval] interval_response = self._lua_interval(keys=keys, args=args) if interval_response == 0: # the queue with the id and type does not exist. response = {'status': 'failure'} else: response = {'status': 'success'} return response
def finish(self, job_id, queue_id, queue_type='default'): """Marks any dequeued job as *completed successfully*. Any job which gets a finish will be treated as complete and will be removed from the SharQ. """ if not is_valid_identifier(job_id): raise BadArgumentException('`job_id` has an invalid value.') if not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') keys = [self._key_prefix, queue_type] args = [queue_id, job_id] response = {'status': 'success'} finish_response = self._lua_finish(keys=keys, args=args) if finish_response == 0: # the finish failed. response.update({'status': 'failure'}) return response
def clear_queue(self, queue_type=None, queue_id=None, purge_all=False): """clear the all entries in queue with particular queue_id and queue_type. It takes an optional argument, purge_all : if True, then it will remove the related resources from the redis. """ if queue_id is None or not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if queue_type is None or not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') response = {'status': 'Failure', 'message': 'No queued calls found'} # remove from the primary sorted set primary_set = '{}:{}'.format(self._key_prefix, queue_type) queued_status = self._r.zrem(primary_set, queue_id) if queued_status: response.update({ 'status': 'Success', 'message': 'Successfully removed all queued calls' }) # do a full cleanup of reources # although this is not necessary as we don't remove resources # while dequeue operation job_queue_list = '{}:{}:{}'.format(self._key_prefix, queue_type, queue_id) if queued_status and purge_all: job_list = self._r.lrange(job_queue_list, 0, -1) pipe = self._r.pipeline() # clear the payload data for job_uuid for job_uuid in job_list: if job_uuid is None: continue payload_set = '{}:payload'.format(self._key_prefix) job_payload_key = '{}:{}:{}'.format(queue_type, queue_id, job_uuid) pipe.hdel(payload_set, job_payload_key) # clear jobrequest interval interval_set = '{}:interval'.format(self._key_prefix) job_interval_key = '{}:{}'.format(queue_type, queue_id) pipe.hdel(interval_set, job_interval_key) # clear job_queue_list pipe.delete(job_queue_list) pipe.execute() response.update({ 'status': 'Success', 'message': 'Successfully removed all queued calls and purged related resources' }) else: # always delete the job queue list self._r.delete(job_queue_list) return response
def enqueue(self, payload, interval, job_id, queue_id, queue_type='default', requeue_limit=None): """Enqueues the job into the specified queue_id of a particular queue_type """ # validate all the input if not is_valid_interval(interval): raise BadArgumentException('`interval` has an invalid value.') if not is_valid_identifier(job_id): raise BadArgumentException('`job_id` has an invalid value.') if not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') if requeue_limit is None: requeue_limit = self._default_job_requeue_limit if not is_valid_requeue_limit(requeue_limit): raise BadArgumentException('`requeue_limit` has an invalid value.') try: serialized_payload = serialize_payload(payload) except TypeError as e: raise BadArgumentException(e.message) timestamp = str(generate_epoch()) keys = [ self._key_prefix, queue_type ] args = [ timestamp, queue_id, job_id, '"%s"' % serialized_payload, interval, requeue_limit ] self._lua_enqueue(keys=keys, args=args) response = { 'status': 'queued' } return response
def clear_queue(self, queue_type=None, queue_id=None, purge_all=False): """clear the all entries in queue with particular queue_id and queue_type. It takes an optional argument, purge_all : if True, then it will remove the related resources from the redis. """ if queue_id is None or not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if queue_type is None or not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') response = { 'status': 'Failure', 'message': 'No queued calls found' } # remove from the primary sorted set primary_set = '{}:{}'.format(self._key_prefix, queue_type) queued_status = self._r.zrem(primary_set, queue_id) if queued_status: response.update({'status': 'Success', 'message': 'Successfully removed all queued calls'}) # do a full cleanup of reources # although this is not necessary as we don't remove resources # while dequeue operation job_queue_list = '{}:{}:{}'.format(self._key_prefix, queue_type, queue_id) if queued_status and purge_all: job_list = self._r.lrange(job_queue_list, 0, -1) pipe = self._r.pipeline() # clear the payload data for job_uuid for job_uuid in job_list: if job_uuid is None: continue payload_set = '{}:payload'.format(self._key_prefix) job_payload_key = '{}:{}:{}'.format(queue_type, queue_id, job_uuid) pipe.hdel(payload_set, job_payload_key) # clear jobrequest interval interval_set = '{}:interval'.format(self._key_prefix) job_interval_key = '{}:{}'.format(queue_type, queue_id) pipe.hdel(interval_set, job_interval_key) # clear job_queue_list pipe.delete(job_queue_list) pipe.execute() response.update({'status': 'Success', 'message': 'Successfully removed all queued calls and purged related resources'}) else: # always delete the job queue list self._r.delete(job_queue_list) return response
def dequeue(self, queue_type='default'): """Dequeues a job from any of the ready queues based on the queue_type. If no job is ready, returns a failure status. """ if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') timestamp = str(generate_epoch()) keys = [self._key_prefix, queue_type] args = [timestamp, self._job_expire_interval] dequeue_response = self._lua_dequeue(keys=keys, args=args) if len(dequeue_response) < 4: response = {'status': 'failure'} return response queue_id, job_id, payload, requeues_remaining = dequeue_response payload = deserialize_payload(payload[1:-1]) response = { 'status': 'success', 'queue_id': queue_id, 'job_id': job_id, 'payload': payload, 'requeues_remaining': int(requeues_remaining) } return response
def interval(self, interval, queue_id, queue_type='default'): """Updates the interval for a specific queue_id of a particular queue type. """ # validate all the input if not is_valid_interval(interval): raise BadArgumentException('`interval` has an invalid value.') if not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') # generate the interval key interval_hmap_key = '%s:interval' % self._key_prefix interval_queue_key = '%s:%s' % (queue_type, queue_id) keys = [ interval_hmap_key, interval_queue_key ] args = [ interval ] interval_response = self._lua_interval(keys=keys, args=args) if interval_response == 0: # the queue with the id and type does not exist. response = { 'status': 'failure' } else: response = { 'status': 'success' } return response
def finish(self, job_id, queue_id, queue_type='default'): """Marks any dequeued job as *completed successfully*. Any job which gets a finish will be treated as complete and will be removed from the SharQ. """ if not is_valid_identifier(job_id): raise BadArgumentException('`job_id` has an invalid value.') if not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') keys = [ self._key_prefix, queue_type ] args = [ queue_id, job_id ] response = { 'status': 'success' } finish_response = self._lua_finish(keys=keys, args=args) if finish_response == 0: # the finish failed. response.update({ 'status': 'failure' }) return response
def dequeue(self, queue_type='default'): """Dequeues a job from any of the ready queues based on the queue_type. If no job is ready, returns a failure status. """ if not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') timestamp = str(generate_epoch()) keys = [ self._key_prefix, queue_type ] args = [ timestamp, self._job_expire_interval ] dequeue_response = self._lua_dequeue(keys=keys, args=args) if len(dequeue_response) < 4: response = { 'status': 'failure' } return response queue_id, job_id, payload, requeues_remaining = dequeue_response payload = deserialize_payload(payload[1:-1]) response = { 'status': 'success', 'queue_id': queue_id, 'job_id': job_id, 'payload': payload, 'requeues_remaining': int(requeues_remaining) } return response
def metrics(self, queue_type=None, queue_id=None): """Provides a way to get statistics about various parameters like, * global enqueue / dequeue rates per min. * per queue enqueue / dequeue rates per min. * queue length of each queue. * list of queue ids for each queue type. """ if queue_id is not None and not is_valid_identifier(queue_id): raise BadArgumentException('`queue_id` has an invalid value.') if queue_type is not None and not is_valid_identifier(queue_type): raise BadArgumentException('`queue_type` has an invalid value.') response = { 'status': 'failure' } if not queue_type and not queue_id: # return global stats. # list of active queue types (ready + active) active_queue_types = self._r.smembers( '%s:active:queue_type' % self._key_prefix) ready_queue_types = self._r.smembers( '%s:ready:queue_type' % self._key_prefix) all_queue_types = active_queue_types | ready_queue_types # global rates for past 10 minutes timestamp = str(generate_epoch()) keys = [ self._key_prefix ] args = [ timestamp ] enqueue_details, dequeue_details = self._lua_metrics( keys=keys, args=args) enqueue_counts = {} dequeue_counts = {} # the length of enqueue & dequeue details are always same. for i in xrange(0, len(enqueue_details), 2): enqueue_counts[str(enqueue_details[i])] = int( enqueue_details[i + 1] or 0) dequeue_counts[str(dequeue_details[i])] = int( dequeue_details[i + 1] or 0) response.update({ 'status': 'success', 'queue_types': list(all_queue_types), 'enqueue_counts': enqueue_counts, 'dequeue_counts': dequeue_counts }) return response elif queue_type and not queue_id: # return list of queue_ids. # get data from two sorted sets in a transaction pipe = self._r.pipeline() pipe.zrange('%s:%s' % (self._key_prefix, queue_type), 0, -1) pipe.zrange('%s:%s:active' % (self._key_prefix, queue_type), 0, -1) ready_queues, active_queues = pipe.execute() # extract the queue_ids from the queue_id:job_id string active_queues = [i.split(':')[0] for i in active_queues] all_queue_set = set(ready_queues) | set(active_queues) response.update({ 'status': 'success', 'queue_ids': list(all_queue_set) }) return response elif queue_type and queue_id: # return specific details. active_queue_types = self._r.smembers( '%s:active:queue_type' % self._key_prefix) ready_queue_types = self._r.smembers( '%s:ready:queue_type' % self._key_prefix) all_queue_types = active_queue_types | ready_queue_types # queue specific rates for past 10 minutes timestamp = str(generate_epoch()) keys = [ '%s:%s:%s' % (self._key_prefix, queue_type, queue_id) ] args = [ timestamp ] enqueue_details, dequeue_details = self._lua_metrics( keys=keys, args=args) enqueue_counts = {} dequeue_counts = {} # the length of enqueue & dequeue details are always same. for i in xrange(0, len(enqueue_details), 2): enqueue_counts[str(enqueue_details[i])] = int( enqueue_details[i + 1] or 0) dequeue_counts[str(dequeue_details[i])] = int( dequeue_details[i + 1] or 0) # get the queue length for the job queue queue_length = self._r.llen('%s:%s:%s' % ( self._key_prefix, queue_type, queue_id)) response.update({ 'status': 'success', 'queue_length': int(queue_length), 'enqueue_counts': enqueue_counts, 'dequeue_counts': dequeue_counts }) return response elif not queue_type and queue_id: raise BadArgumentException( '`queue_id` should be accompanied by `queue_type`.') return response