def handle(self, *args, **options): # get a list of all Topics queues = {} topics = Topic.objects.all() for topic in topics: queues[topic.topic] = topic.id c = Client(servers) c.connect() # listen forever print(queues.keys()) while(True): jobs = c.get_job(list(queues.keys())) for queue_name, job_id, job in jobs: try: topic = Topic.objects.get(topic=queue_name) new_message = Message(topic=topic, body=job, ack_date=timezone.now()) new_message.save() c.ack_job(job_id) except Exception as e: c.nack_job(job_id) time.sleep(1)
def main(): """Start the poor_consumer.""" try: opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "nack=", "servers=", "queues="]) except getopt.GetoptError as err: print str(err) usage() sys.exit() # defaults nack = 0.0 verbose = False servers = "localhost:7712,localhost:7711" queues = "test" for o, a in opts: if o == "-v": verbose = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("--nack"): nack = float(a) elif o in ("--servers"): servers = a elif o in ("--queues"): queues = a else: assert False, "unhandled option" # prepare servers and queus for pydisque servers = servers.split(",") queues = queues.split(",") c = Client(servers) c.connect() while True: jobs = c.get_job(queues) for queue_name, job_id, job in jobs: rnd = random.random() # as this is a test processor, we don't do any validation on # the actual job body, so lets just pay attention to id's if rnd >= nack: print ">>> received job:", job_id c.ack_job(job_id) else: print ">>> bouncing job:", job_id c.nack_job(job_id)
class Queue: def __init__(self, conf): self.conf = conf self.client = Client([':'.join([self.host, str(self.port)])]) self.client.connect() @property def host(self): return self.conf.get('host') or 'localhost' @property def port(self): return self.conf.get('port') or 7711 @property def queue(self): return self.conf.get('queue') or 'downloader' def get(self): return self.client.get_job([self.queue]) def add(self, job): return self.client.add_job(self.queue, json.dumps(job), timeout=1000) def ack(self, job_id): return self.client.ack_job(job_id)
class TestDisque(unittest.TestCase): """TestCase class for pydisque.""" testID = None def setUp(self): """Setup the tests.""" self.client = Client(['localhost:7711']) self.client.connect() self.testID = "%d.%d" % (time.time(), random.randint(1000, 1000000)) def test_publish_and_receive(self): """Test the most important functions of pydisque.""" t1 = str(time.time()) self.client.add_job("test_q", t1, timeout=100) jobs = self.client.get_job(['test_q']) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job(['test_q'], timeout=100)) == 0 def test_nack(self): """Fetch the queue, return a job, check that it's back.""" t1 = str(time.time()) queuename = "test_nack." + self.testID self.client.add_job(queuename, str(t1), timeout=100) jobs = self.client.get_job([queuename]) # NACK the first read assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert len(jobs) == 1 assert job == six.b(t1) self.client.nack_job(job_id) # this time ACK it jobs = self.client.get_job([queuename]) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job([queuename], timeout=100)) == 0 def test_qpeek(self): """ Test qpeek. Ran into some problems with an ENQUEUE/DEQUEUE test that was using qpeek, checking core functionality of qpeek(). """ queuename = "test_qpeek-%s" % self.testID job_id = self.client.add_job(queuename, "Peek A Boo") peeked = self.client.qpeek(queuename, 1) assert peeked[0][1] == job_id def test_qscan(self): """ Test the qscan function. This test relies on add_job() being functional, and the local disque not being a disque proxy to a mesh. TODO: unique the queues with self.testID. """ t1 = str(time.time()) self.client.add_job("q1", t1, timeout=100) self.client.add_job("q2", t1, timeout=100) qb = self.client.qscan() assert qb[0] assert qb[1] assert six.b("q1") in qb[1] assert six.b("q2") in qb[1] def test_jscan(self): """Simple test of the jscan function.""" t1 = time.time() queuename = "test_jscan-%s" % self.testID j1 = self.client.add_job(queuename, str(t1), timeout=100) jerbs = self.client.jscan(queue=queuename) assert j1 in jerbs[1] def test_del_job(self): """Simple test of del_job, needs qpeek. FIXME: This function has grown ugly. """ t1 = time.time() queuename = "test_del_job-%s" % self.testID j1 = self.client.add_job(queuename, str(t1)) jerbs = self.client.qpeek(queuename, 1) jlist = [] for item in jerbs: jlist.append(item[1]) assert j1 in jlist self.client.del_job(j1) jerbs = self.client.qpeek(queuename, 1) jlist = [] for item in jerbs: jlist.append(item[1]) assert j1 not in jerbs def test_qlen(self): """Simple test of qlen.""" queuename = "test_qlen-%s" % self.testID lengthOfTest = 100 test_job = "Useless Job." for x in range(lengthOfTest): self.client.add_job(queuename, test_job) assert self.client.qlen(queuename) == lengthOfTest def test_qstat(self): """Testing QSTAT (default behavior).""" queuename = "test_qstat-%s" % self.testID testqueue = ["a", "b", "c"] for x in testqueue: self.client.add_job(queuename, x) stat = self.client.qstat(queuename) # check the basics assert 'jobs-in' in stat assert 'jobs-out' in stat def test_qstat_dict(self): """Testing QSTAT's (new dict behavior).""" queuename = "test_qstat_dict-%s" % self.testID testqueue = ["a", "b", "c"] for x in testqueue: self.client.add_job(queuename, x) stat = self.client.qstat(queuename, True) assert stat.get('jobs-in', None) is not None assert stat.get('jobs-out', None) is not None def test_shownack(self): """Test that NACK and SHOW work appropriately.""" queuename = "test_show-%s" % self.testID test_job = "Show me." self.client.add_job(queuename, test_job) jobs = self.client.get_job([queuename]) for queue_name, job_id, job in jobs: self.client.nack_job(job_id) shown = self.client.show(job_id, True) assert shown.get('body') == test_job assert shown.get('nacks') == 1 def test_pause(self): """Test that a PAUSE message is acknowledged.""" queuename = "test_show-%s" % self.testID test_job = "Jerbs, they are a thing" self.client.pause(queuename, kw_in=True) try: job_id = self.client.add_job(queuename, test_job) except ResponseError: pass # can we add a job again? self.client.pause(queuename, kw_none=True) job_id = self.client.add_job(queuename, test_job) jobs = self.client.get_job([queuename]) # TODO(canardleteer): add a test of PAUSE SHOW def test_get_job(self): queue_name = "test_get_job." + self.testID job = str(time.time()) job_id = self.client.add_job(queue_name, job) expected = [(queue_name, job_id, job)] got = self.client.get_job([queue_name], withcounters=False) assert expected == got def test_get_job_withcounters(self): queue_name = "test_get_job." + self.testID job = str(time.time()) job_id = self.client.add_job(queue_name, job) nacks = 0 additional_deliveries = 0 expected = [(queue_name, job_id, job, nacks, additional_deliveries)] got = self.client.get_job([queue_name], withcounters=True) assert expected == got
def ack_job(request): user = request.user ip = request.META['REMOTE_ADDR'] zone = request.POST['zone'] jobIds = request.POST.getlist('jobIds', []) if not user.groups.filter(name__in=['admin', 'dba', 'disque']).exists(): logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), 'permission denied') return HttpResponse(json.dumps({'errcode': 403}), content_type=DEFAULT_CONTENT_TYPE) try: clusterInfo = ClusterInfo.objects.get(name=zone) print clusterInfo.addr except ClusterInfo.DoesNotExist: logs(user, ip, 'ack job: %s' % jobIds, 'unknown disque zone: %s' % zone) return HttpResponse(json.dumps({ 'errcode': 400, 'msg': 'unknown disque zone:%s' % zone }), content_type=DEFAULT_CONTENT_TYPE) except ClusterInfo.MultipleObjectsReturned: logs(user, ip, 'ack job: %s' % jobIds, 'multi objects returned for zone: %s' % zone) return HttpResponse(json.dumps({ 'errcode': 400, 'msg': 'multi objects returned for zone:%s' % zone }), content_type=DEFAULT_CONTENT_TYPE) except Exception as e: print e logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), str(e)) return HttpResponse(json.dumps({ 'errcode': 400, 'msg': str(e) }), content_type=DEFAULT_CONTENT_TYPE) if len(jobIds) == 0: logs(user, ip, 'ack job: zone-%s' % zone, 'empty jobIds') return HttpResponse(json.dumps({ 'errcode': 400, 'msg': 'empty jobIds' }), content_type=DEFAULT_CONTENT_TYPE) jobIds = map(lambda x: x.encode('utf-8'), jobIds) print user, zone, jobIds try: addr = clusterInfo.addr.split(',') client = Client(addr) client.connect() client.ack_job(*jobIds) except Exception as e: print e logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), str(e)) return HttpResponse(json.dumps({ 'errcode': 400, 'msg': str(e) }), content_type=DEFAULT_CONTENT_TYPE) logs(user, ip, 'ack job: %s , zone: %s' % (jobIds, zone), 'success') return HttpResponse(json.dumps({'errcode': 200}), content_type=DEFAULT_CONTENT_TYPE)
def main(): """Primary CLI application logic.""" try: opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "dservers=", "dqueue=", "secret=", "bserver=", "hserver=", "rserver=", "rchannel=", "bfiltername=", "hllname=", "mode=", "sleep="]) except getopt.GetoptError as err: print(str(err)) usage() sys.exit() modes = ("generate", "listen", "check", "adaptive", "initialize", "subscriber") # set defaults mode = None dservers = "localhost:7712,localhost:7711" dqueue = "objbomber" secret = "coolsecretbro" bserver = "localhost:8673" hserver = "localhost:4553" rserver = "localhost:6379" rchannel = "objbomber" bfiltername = "objbomber" hllname = "objbomber" sleep = None userhomedir = os.path.expanduser("~") # flippin' switches... for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() elif o in ("--dservers"): dservers = a elif o in ("--queue"): dqueue = [a] elif o in ("--secret"): secret = a elif o in ("--bserver"): bserver = a elif o in ("--hserver"): hserver = a elif o in ("--rserver"): rserver = a elif o in ("--rchannel"): rchannel = a elif o in ("--bfiltername"): bfiltername = a elif o in ("--hllname"): hllname = a elif o in ("--mode"): if a in modes: mode = a else: usage() sys.exit() elif o in ("--listen"): mode_listen = True elif o in ("--check"): mode_check = True elif o in ("--sleep"): sleep = int(a) else: assert False, "unhandled option" checkdqueue = dqueue + ".check" if sleep in (None, 0): sleep = 0.0001 # mode must be set if not mode: usage() sys.exit() # Handler for the cryptographic signatures # TODO: secret should be "secret" + a version number s = Serializer(secret) # config basics datadir = userhomedir + "/.objbomber" # prepare servers and queue lists dservers = dservers.split(",") bserver = [bserver] hserver = hserver # all modes use Disque logging.info("Connecting to Disque...") disque_client = Client(dservers) disque_client.connect() if mode in ("check", "listen"): logging.info("Creating Bloomd Client...") bloomd_client = BloomdClient(bserver) bfilter = bloomd_client.create_filter(bfiltername) # add pyhlld logging.info("Creating HLLD Client... - not yet used") hlld_client = HlldClient(hserver) hll = hlld_client.create_set(hllname) if mode in ("check", "listen", "generate", "subscriber"): # add redis hll & pubsub logging.info("Creating Redis Client...") rhost, rport = rserver.split(":") redd = redis.StrictRedis(host=rhost, port=rport, db=0) redpubsub = redd.pubsub() if mode in ("subscriber"): redpubsub.subscribe(rchannel) if mode in ("generate"): # TODO: check on how well LevelDB handles # multiple clients db = leveldb.LevelDB(datadir + '/notary') # special mode to handle our first run # TODO: push into a function # TODO: handle filesystem errors # TODO: reconsider using Cement for all of this # TODO: generate an instance GUID if mode == "initialize": UUID = uuid.uuid4() logging.info("Our system UUID is now: %s" % UUID) # TODO: save and load this uuid # check to see if there is a ~/.objbomber directory, quit if there is # TODO: this does not handle errors in initalization logging.info("Checking for .objbomber in %s..." % userhomedir) if os.path.exists(datadir): logging.info("Already been initialized!") # TODO: print some information about how to handle this sys.exit() # TODO: make one os.mkdir(datadir, 0700) # generate our RSA signing key # TODO: make # of bits in key a flag logging.info("Begining to create our notary key.") logging.info("Reading from RNG.") rng = Random.new().read logging.info("Generating RSA key...") privRSAkey = RSA.generate(4096, rng) privPEM = privRSAkey.exportKey() pubRSAkey = privRSAkey.publickey() pubPEM = pubRSAkey.exportKey() logging.info("Key generated.") # save privkey to disk with open(datadir + "/privkey.pem", "w") as keyfile: keyfile.write(privPEM) keyfile.close() os.chmod(datadir + "/privkey.pem", 0700) logging.info("Unencrypted RSA key written to disk.") # save the pubkey with open(datadir + "/pubkey.pem", "w") as keyfile: keyfile.write(pubPEM) keyfile.close() logging.info("Public RSA key written to disk.") logging.info("Creating crypto notary storage.") leveldb.LevelDB(datadir + '/notary') # we absolutely must quit here, or we will get stuck in # an infinate loop sys.exit() # load our secret key (TODO: this is probably better as try/exc) # and build our contexts with open(datadir + "/privkey.pem", "r") as keyfile: privRSAkey = RSA.importKey(keyfile.read()) while True: # TODO: Adaptive Mode - this mode should peek the queues, and # make a decision about where this thread can make the most # impact on its own. if mode == "adaptive": # TODO: Do some queue peeking. # TODO: Make some decisions about which mode to adapt to. pass # TODO: All modes should be placed into functions. # Listen Mode - Listens to the queue, pulls out jobs, # validates the signature, puts them in bloomd if mode == "listen": logging.info("Getting Jobs from Disque.") jobs = disque_client.get_job([dqueue]) print("Got %d jobs." % len(jobs)) for queue_name, job_id, job in jobs: logging.debug("Handling a job: %s" % job) try: job = s.loads(job) logging.debug("Job Authenticated: %s" % job) except: logging.warning("Job did not pass authentication.") disque_client.nack_job(job_id) # add to bloom filter try: bfilter.add(job) except: logging.warning("Job was not added to bloomd.") disque_client.nack_job(job_id) try: hllResponse = hll.add(job) except: logging.warning("Job was not added to hlld.") disque_client.nack_job(job_id) # TODO: add redis HLL support # tell disque that this job has been processed disque_client.ack_job(job_id) # sign the check job job = s.dumps(job) # throw this message on the check queue disque_client.add_job(checkdqueue, job) elif mode == "check": # TODO # Check the secondary disque queue for checks # Ask the bloom filter if they have seen this logging.info("Getting Jobs from Disque.") jobs = disque_client.get_job([checkdqueue]) for queue_name, job_id, job in jobs: logging.debug("Checking: %s" % job) try: job = s.loads(job) except: disque_client.nack_job(job_id) # we don't NACK on failed cache hits if job in bfilter: logging.info("Confirming: %s" % job) else: logging.info("Not found in bloom filter: %s" % job) disque_client.ack_job(job_id) elif mode == "generate": # TODO - where will these messages come from? # for now they will just be random numbers, but # really we should make them objects to really be # testing serialization msg = [random.randint(1000, 1000000), random.randint(1000, 1000000)] # itsdangerous serialization & signing msg = s.dumps(msg) # Now that this message is serialized, we can sign it again with a # public key. # TODO: incorporate saving the signature into the notary records msghash = SHA.new(msg) signer = PKCS1_v1_5.new(privRSAkey) signature = signer.sign(msghash) assert signer.verify(msghash, signature) record = {'message': msg, 'signature': signature} record = pickle.dumps(record) # send the job over to Disque # TODO: add more command line flags for queuing job_id = disque_client.add_job(dqueue, msg) logging.debug("Added a job to Disque: %s" % msg) # publish just the signature on redis pubsub redd.publish(rchannel, signature) # TODO: save the publication in the notary # TODO: do more then just save the signatures # TODO: add a GUID to the key key = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") db.Put(key, record) # testing the results of leveldb's store, this is a test, and # an expensive test sig2 = db.Get(key) sig2 = pickle.loads(sig2)['signature'] assert signer.verify(msghash, sig2) elif mode == "subscriber": msg = redpubsub.get_message() # TODO: do something useful, like log if msg: print("got a message") time.sleep(sleep)
class TestDisque(unittest.TestCase): """TestCase class for pydisque.""" testID = None def setUp(self): """Setup the tests.""" self.client = Client(['localhost:7711']) self.client.connect() self.testID = "%d.%d" % (time.time(), random.randint(1000, 1000000)) def test_publish_and_receive(self): """Test the most important functions of pydisque.""" t1 = str(time.time()) self.client.add_job("test_q", t1, timeout=100) jobs = self.client.get_job(['test_q']) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job(['test_q'], timeout=100)) == 0 def test_nack(self): """Fetch the queue, return a job, check that it's back.""" t1 = str(time.time()) queuename = "test_nack." + self.testID self.client.add_job(queuename, str(t1), timeout=100) jobs = self.client.get_job([queuename]) # NACK the first read assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert len(jobs) == 1 assert job == six.b(t1) self.client.nack_job(job_id) # this time ACK it jobs = self.client.get_job([queuename]) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job([queuename], timeout=100)) == 0 def test_qpeek(self): """ Test qpeek. Ran into some problems with an ENQUEUE/DEQUEUE test that was using qpeek, checking core functionality of qpeek(). """ queuename = "test_qpeek-%s" % self.testID job_id = self.client.add_job(queuename, "Peek A Boo") peeked = self.client.qpeek(queuename, 1) assert peeked[0][1] == job_id def test_qscan(self): """ Test the qscan function. This test relies on add_job() being functional, and the local disque not being a disque proxy to a mesh. TODO: unique the queues with self.testID. """ t1 = str(time.time()) self.client.add_job("q1", t1, timeout=100) self.client.add_job("q2", t1, timeout=100) qb = self.client.qscan() assert qb[0] assert qb[1] assert six.b("q1") in qb[1] assert six.b("q2") in qb[1] def test_jscan(self): """Simple test of the jscan function.""" t1 = time.time() queuename = "test_jscan-%s" % self.testID j1 = self.client.add_job(queuename, str(t1), timeout=100) jerbs = self.client.jscan(queue=queuename) assert j1 in jerbs[1] def test_del_job(self): """Simple test of del_job, needs qpeek. FIXME: This function has grown ugly. """ t1 = time.time() queuename = "test_del_job-%s" % self.testID j1 = self.client.add_job(queuename, str(t1)) jerbs = self.client.qpeek(queuename, 1) jlist = [] for item in jerbs: jlist.append(item[1]) assert j1 in jlist self.client.del_job(j1) jerbs = self.client.qpeek(queuename, 1) jlist = [] for item in jerbs: jlist.append(item[1]) assert j1 not in jerbs def test_qlen(self): """Simple test of qlen.""" queuename = "test_qlen-%s" % self.testID lengthOfTest = 100 test_job = "Useless Job." for x in range(lengthOfTest): self.client.add_job(queuename, test_job) assert self.client.qlen(queuename) == lengthOfTest def test_qstat(self): """Testing QSTAT (default behavior).""" queuename = "test_qstat-%s" % self.testID testqueue = ["a", "b", "c"] for x in testqueue: self.client.add_job(queuename, x) stat = self.client.qstat(queuename) # check the basics assert b'jobs-in' in stat assert b'jobs-out' in stat def test_qstat_dict(self): """Testing QSTAT's (new dict behavior).""" queuename = "test_qstat_dict-%s" % self.testID testqueue = ["a", "b", "c"] for x in testqueue: self.client.add_job(queuename, x) stat = self.client.qstat(queuename, True) assert stat.get(b'jobs-in', None) is not None assert stat.get(b'jobs-out', None) is not None def test_shownack(self): """Test that NACK and SHOW work appropriately.""" queuename = "test_show-%s" % self.testID test_job = "Show me." self.client.add_job(queuename, test_job) jobs = self.client.get_job([queuename]) for queue_name, job_id, job in jobs: self.client.nack_job(job_id) shown = self.client.show(job_id, True) assert shown.get(b'body') == b(test_job) assert shown.get(b'nacks') == 1 def _test_pause(self): """ TODO: """ """Test that a PAUSE message is acknowledged.""" queuename = "test_show-%s" % self.testID test_job = "Jerbs, they are a thing" self.client.pause(queuename, kw_in=True) try: job_id = self.client.add_job(queuename, test_job) except ResponseError: pass # can we add a job again? self.client.pause(queuename, kw_none=True) job_id = self.client.add_job(queuename, test_job) jobs = self.client.get_job([queuename]) # TODO(canardleteer): add a test of PAUSE SHOW def test_get_job(self): queue_name = "test_get_job." + self.testID job = str(time.time()) job_id = self.client.add_job(queue_name, job) expected = [(b(queue_name), job_id, b(job))] got = self.client.get_job([queue_name], withcounters=False) assert expected == got def test_get_job_withcounters(self): queue_name = "test_get_job." + self.testID job = str(time.time()) job_id = self.client.add_job(queue_name, job) nacks = 0 additional_deliveries = 0 expected = [(b(queue_name), job_id, b(job), nacks, additional_deliveries)] got = self.client.get_job([queue_name], withcounters=True) assert expected == got
class TestDisque(unittest.TestCase): """TestCase class for pydisque.""" def setUp(self): """Setup the tests.""" self.client = Client(['localhost:7711']) self.client.connect() def test_publish_and_receive(self): """Test the most important functions of pydisque.""" t1 = str(time.time()) self.client.add_job("test_q", t1, timeout=100) jobs = self.client.get_job(['test_q']) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job(['test_q'], timeout=100)) == 0 def test_nack(self): """Fetch the queue, return a job, check that it's back.""" t1 = str(time.time()) self.client.add_job("test_nack_q", str(t1), timeout=100) jobs = self.client.get_job(['test_nack_q']) # NACK the first read assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert len(jobs) == 1 assert job == six.b(t1) self.client.nack_job(job_id) # this time ACK it jobs = self.client.get_job(['test_nack_q']) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job(['test_nack_q'], timeout=100)) == 0 def test_qscan(self): """ Test the qscan function. This test relies on add_job() being functional, and the local disque not being a disque proxy to a mesh. """ t1 = str(time.time()) qa = self.client.qscan() # print "Cursor: %s Jobs: %s" % (qa[0], qa[1]) self.client.add_job("q1", t1, timeout=100) self.client.add_job("q2", t1, timeout=100) qb = self.client.qscan() # print "Cursor: %s Jobs: %s" % (qb[0], qb[1]) assert qb[0] assert qb[1] # i am thinking we need some kind of master 'clear queue' # command in disque, hopefully not just for the purposes of # making this unit test more effective... assert six.b("q1") in qb[1] assert six.b("q2") in qb[1] def test_jscan(self): """Simple test of the jscan function.""" t1 = time.time() queuename = "test_jscan-%d" % random.randint(1000, 1000000) j1 = self.client.add_job(queuename, str(t1), timeout=100) jerbs = self.client.jscan(queue=queuename) assert j1 in jerbs[1] def test_del_job(self): """Simple test of del_job, needs jscan.""" t1 = time.time() queuename = "test_del_job-%d" % random.randint(1000, 1000000) j1 = self.client.add_job(queuename, str(t1)) jerbs = self.client.jscan(queue=queuename) assert j1 in jerbs[1] self.client.del_job(j1) jerbs = self.client.jscan(queue=queuename) assert j1 not in jerbs[1] def test_qlen(self): """Simple test of qlen.""" queuename = "test_qlen-%d" % random.randint(1000, 1000000) lengthOfTest = 100 test_job = "Useless Job." for x in range(lengthOfTest): self.client.add_job(queuename, test_job) assert self.client.qlen(queuename) == lengthOfTest
parser.add_argument("-i", "--id", type=str, required=True, help="Module ID.") args = parser.parse_args() module_name, module_id = args.id.split('_') runtime = json.load(open(args.runtime, 'r')) pipeline = json.load(open(args.pipeline, 'r')) disque_client = Client(runtime["Disque_Default"]) disque_client.connect() # LOGGING # publisher.info(module_name + ": started to receive & publish.") nb = 0 while True: messages = receive(disque_client, pipeline[module_name]["source-queue"]) if len(messages) > 0: publisher.debug(module_name + ': Got a message') for qname, job_id, payload in messages: send(disque_client, pipeline[module_name]["destination-queues"], payload) disque_client.ack_job(job_id) nb += 1 if nb % 100 == 0: publisher.info('{} ({}): {} messages processed, {} to go.'.format( module_name, module_id, nb, disque_client.qlen(qname))) else: publisher.debug(module_name + ": Empty Queues: Waiting...") time.sleep(1)
import json import time import logging logging.basicConfig(level=logging.DEBUG) from pydisque.client import Client c = Client(['localhost:7712', 'localhost:7711']) c.connect() while True: jobs = c.get_job(['test']) for queue_name, job_id, job in jobs: job = json.loads(job) print ">>> received job:", job c.ack_job(job_id)
def main(): """The job listening loop.""" try: opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "servers=", "queues="]) except getopt.GetoptError as err: print str(err) usage() sys.exit() # defaults servers = "localhost:7712,localhost:7711" queues = "twitpush" for o, a in opts: if o in ("-h", "--help"): usage() sys.exit() elif o in ("--servers"): servers = a elif o in ("--queues"): queues = a else: assert False, "unhandled option" # prepare servers and queues for pydisque servers = servers.split(",") queues = queues.split(",") c = Client(servers) c.connect() # prepare our tweepy instance tweepy_auth = tweepy.OAuthHandler(tweepy_consumer_key, tweepy_consumer_secret) tweepy_auth.set_access_token(tweepy_access_token, tweepy_access_token_secret) tweepy_api = tweepy.API(tweepy_auth) while True: jobs = c.get_job(queues) for queue_name, job_id, job in jobs: try: ts, command, payload = pickle.loads(job) if command == "UPDATE": tweepy_api.update_status(payload) print("tweepy_api.update_status(%s)" % payload) c.ack_job(job_id) else: # we dunno what to do c.nack_job(job_id) except Exception as e: # we shouldn't nack jobs if they shouldn't be requeued if 'message' in e[0][0]: if e[0][0]['message'] == 'Status is a duplicate.': c.ack_job(job_id) continue c.nack_job(job_id) # exiting for now to avoid slamming the Twitter API print(e) sys.exit()