def read_message(self): messages = self.sqs_queue.get_messages( MESSAGE_DOWNLOAD_BATCH_SIZE, wait_time_seconds=LONG_POLLING_INTERVAL) logger.info("Successfully got {} messages from SQS queue {}".format( len(messages), self.sqs_queue.name)) # noqa start = time.time() for message in messages: end = time.time() if int(end - start) >= self.visibility_timeout: # Don't add any more messages since they have re-appeared in the sqs queue # Instead just reset and get fresh messages from the sqs queue msg = "Clearing Local messages since we exceeded their visibility_timeout" logger.warning(msg) break message_body = decode_message(message) try: packed_message = { "queue": self.sqs_queue.id, "message": message, "start_time": start, "timeout": self.visibility_timeout, } self.internal_queue.put(packed_message, True, self.visibility_timeout) except Full: msg = "Timed out trying to add the following message to the internal queue after {} seconds: {}".format( self.visibility_timeout, message_body) # noqa logger.warning(msg) continue else: logger.debug( "Message successfully added to internal queue from SQS queue {} with body: {}" .format(self.sqs_queue.name, message_body)) # noqa
def test_worker_fills_internal_queue_from_celery_task(): """ Test read workers fill internal queue with celery tasks """ conn = boto3.client('sqs', region_name='us-east-1') queue_url = conn.create_queue(QueueName="tester")['QueueUrl'] message = ('{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfa' 'W5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJw' 'pwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOAp' 'zcy4=\\n", "some stuff": "asdfasf"}') conn.send_message(QueueUrl=queue_url, MessageBody=message) internal_queue = Queue() worker = ReadWorker(queue_url, internal_queue, BATCHSIZE, parent_id=1) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message2', }, })
def test_worker_fills_internal_queue(): """ Test read workers fill internal queue """ conn = boto.connect_sqs() queue = conn.create_queue("tester") message = Message() body = json.dumps({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, }) message.set_body(body) queue.write(message) internal_queue = Queue() worker = ReadWorker(queue, internal_queue, BATCHSIZE) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, })
def process_message(self): try: packed_message = self.internal_queue.get(timeout=0.5) except Empty: # Return False if we did not attempt to process any messages return False message = packed_message['message'] queue_id = packed_message['queue'] fetch_time = packed_message['start_time'] timeout = packed_message['timeout'] message_body = decode_message(message) full_task_path = message_body['task'] args = message_body['args'] kwargs = message_body['kwargs'] task_name = full_task_path.split(".")[-1] task_path = ".".join(full_task_path.split(".")[:-1]) task_module = importlib.import_module(task_path) task = getattr(task_module, task_name) current_time = time.time() if int(current_time - fetch_time) >= timeout: logger.warning( "Discarding task {} with args: {} and kwargs: {} due to exceeding visibility timeout" .format( # noqa full_task_path, repr(args), repr(kwargs), )) return True try: start_time = time.clock() task(*args, **kwargs) except Exception: end_time = time.clock() logger.exception( "Task {} raised error in {:.4f} seconds: with args: {} and kwargs: {}: {}" .format( full_task_path, end_time - start_time, args, kwargs, traceback.format_exc(), )) return True else: end_time = time.clock() params = {'ReceiptHandle': message.receipt_handle} self.conn.get_status('DeleteMessage', params, queue_id) logger.info( "Processed task {} in {:.4f} seconds with args: {} and kwargs: {}" .format( full_task_path, end_time - start_time, repr(args), repr(kwargs), )) return True
def test_worker_fills_internal_queue_from_celery_task(): """ Test read workers fill internal queue with celery tasks """ conn = boto.connect_sqs() queue = conn.create_queue("tester") message = Message() body = '{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOApzcy4=\\n", "some stuff": "asdfasf"}' message.set_body(body) queue.write(message) internal_queue = Queue() worker = ReadWorker(queue, internal_queue, BATCHSIZE) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message2', }, })
def test_worker_fills_internal_queue(): """ Test read workers fill internal queue """ conn = boto3.client('sqs', region_name='us-east-1') queue_url = conn.create_queue(QueueName="tester")['QueueUrl'] message = json.dumps({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, }) conn.send_message(QueueUrl=queue_url, MessageBody=message) internal_queue = Queue() worker = ReadWorker(queue_url, internal_queue, BATCHSIZE) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, })
def test_worker_fills_internal_queue_from_celery_task(): """ Test read workers fill internal queue with celery tasks """ conn = boto3.client('sqs', region_name='us-east-1') queue_url = conn.create_queue(QueueName="tester")['QueueUrl'] message = ( '{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfa' 'W5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJw' 'pwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOAp' 'zcy4=\\n", "some stuff": "asdfasf"}' ) conn.send_message(QueueUrl=queue_url, MessageBody=message) internal_queue = Queue() worker = ReadWorker(queue_url, internal_queue, BATCHSIZE) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message2', }, })
def read_message(self): messages = self.conn.receive_message( QueueUrl=self.queue_url, MaxNumberOfMessages=self.batchsize, WaitTimeSeconds=LONG_POLLING_INTERVAL, ).get('Messages', []) logger.info("Successfully got {} messages from SQS queue {}".format(len(messages), self.queue_url)) # noqa start = time.time() for message in messages: end = time.time() if int(end - start) >= self.visibility_timeout: # Don't add any more messages since they have re-appeared in the sqs queue # Instead just reset and get fresh messages from the sqs queue msg = "Clearing Local messages since we exceeded their visibility_timeout" logger.warning(msg) break message_body = decode_message(message) try: packed_message = { "queue": self.queue_url, "message": message, "start_time": start, "timeout": self.visibility_timeout, } self.internal_queue.put(packed_message, True, self.visibility_timeout) except Full: msg = "Timed out trying to add the following message to the internal queue after {} seconds: {}".format(self.visibility_timeout, message_body) # noqa logger.warning(msg) continue else: logger.debug("Message successfully added to internal queue from SQS queue {} with body: {}".format(self.queue_url, message_body)) # noqa
def test_worker_fills_internal_queue_from_celery_task(): """ Test read workers fill internal queue with celery tasks """ conn = boto.connect_sqs() queue = conn.create_queue("tester") message = Message() body = '{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOApzcy4=\\n", "some stuff": "asdfasf"}' message.set_body(body) queue.write(message) internal_queue = Queue() worker = ReadWorker(queue, internal_queue) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message2', }, })
def test_worker_fills_internal_queue(): """ Test read workers fill internal queue """ conn = boto.connect_sqs() queue = conn.create_queue("tester") message = Message() body = json.dumps({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, }) message.set_body(body) queue.write(message) internal_queue = Queue() worker = ReadWorker(queue, internal_queue) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, })
def test_worker_fills_internal_queue(): """ Test read workers fill internal queue """ conn = boto3.client('sqs', region_name='us-east-1') queue_url = conn.create_queue(QueueName="tester")['QueueUrl'] message = json.dumps({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, }) conn.send_message(QueueUrl=queue_url, MessageBody=message) internal_queue = Queue() worker = ReadWorker(queue_url, internal_queue, BATCHSIZE, parent_id=1) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message['message']) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, })
def read_message(self): messages = self.queue.get_messages(10) for message in messages: message_body = decode_message(message) try: self.internal_queue.put_nowait(message_body) except Full: continue else: message.delete()
def read_message(self): messages = self.conn.receive_message( QueueUrl=self.queue_url, MaxNumberOfMessages=self.batchsize, WaitTimeSeconds=LONG_POLLING_INTERVAL, ).get('Messages', []) logger.debug( "Successfully got {} messages from SQS queue {}".format( len(messages), self.queue_url)) # noqa start = time.time() for message in messages: end = time.time() if int(end - start) >= self.visibility_timeout: # Don't add any more messages since they have # re-appeared in the sqs queue Instead just reset and get # fresh messages from the sqs queue msg = ( "Clearing Local messages since we exceeded " "their visibility_timeout" ) logger.warning(msg) break message_body = decode_message(message) try: packed_message = { "queue": self.queue_url, "message": message, "start_time": start, "timeout": self.visibility_timeout, } self.internal_queue.put( packed_message, True, self.visibility_timeout) except Full: msg = ( "Timed out trying to add the following message " "to the internal queue after {} seconds: {}" ).format(self.visibility_timeout, message_body) # noqa logger.warning(msg) continue else: logger.debug( "Message successfully added to internal queue " "from SQS queue {} with body: {}".format( self.queue_url, message_body)) # noqa
def test_worker_reads_messages_from_sqs(): """ Test simple worker reads from sqs queue """ queue_url = _add_messages_to_sqs('tests.tasks.index_incrementer', 1) worker = SimpleProcessWorker(queue_url, INTERVAL, BATCHSIZE, parent_id=1) messages = worker.read_message() found_message_body = decode_message(messages[0]) found_message_body.should.equal({ 'task': 'tests.tasks.index_incrementer', 'args': [], 'kwargs': { 'message': 'Test message', }, })
def _create_pre_process_context(self, packed_message): message = packed_message['message'] message_body = decode_message(message) full_task_path = message_body['task'] pre_process_context = { "message_id": message['MessageId'], "task_name": full_task_path.split(".")[-1], "args": message_body['args'], "kwargs": message_body['kwargs'], "full_task_path": full_task_path, "fetch_time": packed_message['start_time'], "queue_url": packed_message['queue'], "timeout": packed_message['timeout'], "receipt_handle": message['ReceiptHandle'] } return pre_process_context
def process_message(self): try: packed_message = self.internal_queue.get(timeout=0.5) except Empty: return message = packed_message['message'] queue_id = packed_message['queue'] message_body = decode_message(message) full_task_path = message_body['task'] args = message_body['args'] kwargs = message_body['kwargs'] task_name = full_task_path.split(".")[-1] task_path = ".".join(full_task_path.split(".")[:-1]) task_module = importlib.import_module(task_path) task = getattr(task_module, task_name) try: task(*args, **kwargs) except Exception: logger.exception( "Task {} raised error: with args: {} and kwargs: {}: {}".format( full_task_path, args, kwargs, traceback.format_exc(), ) ) return else: params = {'ReceiptHandle': message.receipt_handle} self.conn.get_status('DeleteMessage', params, queue_id) logger.info( "Processed task {} with args: {} and kwargs: {}".format( full_task_path, repr(args), repr(kwargs), ) )
def test_worker_fills_internal_queue(): """ Test read workers fill internal queue """ conn = boto.connect_sqs() queue = conn.create_queue("tester") message = Message() body = json.dumps({"task": "tests.tasks.index_incrementer", "args": [], "kwargs": {"message": "Test message"}}) message.set_body(body) queue.write(message) internal_queue = Queue() worker = ReadWorker(queue, internal_queue) worker.read_message() packed_message = internal_queue.get(timeout=1) found_message_body = decode_message(packed_message["message"]) found_message_body.should.equal( {"task": "tests.tasks.index_incrementer", "args": [], "kwargs": {"message": "Test message"}} )
def read_message(self): messages = self.sqs_queue.get_messages(MESSAGE_DOWNLOAD_BATCH_SIZE, wait_time_seconds=LONG_POLLING_INTERVAL) logger.info("Successfully got {} messages from SQS queue {}".format(len(messages), self.sqs_queue.name)) # noqa start = time.time() for message in messages: end = time.time() if int(end - start) >= self.visibility_timeout: # Don't add any more messages since they have re-appeared in the sqs queue # Instead just reset and get fresh messages from the sqs queue msg = "Clearing Local messages since we exceeded their visibility_timeout" logger.warning(msg) break message_body = decode_message(message) try: packed_message = {"queue": self.sqs_queue.id, "message": message} self.internal_queue.put(packed_message, True, self.visibility_timeout) except Full: msg = "Timed out trying to add the following message to the internal queue after {} seconds: {}".format(self.visibility_timeout, message_body) # noqa logger.warning(msg) continue else: logger.debug("Message successfully added to internal queue from SQS queue {} with body: {}".format(self.sqs_queue.name, message_body)) # noqa
def process_message(self): try: packed_message = self.internal_queue.get(timeout=0.5) except Empty: # Return False if we did not attempt to process any messages return False message = packed_message['message'] queue_url = packed_message['queue'] fetch_time = packed_message['start_time'] timeout = packed_message['timeout'] message_body = decode_message(message) full_task_path = message_body['task'] args = message_body['args'] kwargs = message_body['kwargs'] task_name = full_task_path.split(".")[-1] task_path = ".".join(full_task_path.split(".")[:-1]) task_module = importlib.import_module(task_path) task = getattr(task_module, task_name) current_time = time.time() if int(current_time - fetch_time) >= timeout: logger.warning( "Discarding task {} with args: {} and kwargs: {} due to " "exceeding visibility timeout".format( # noqa full_task_path, repr(args), repr(kwargs), ) ) return True try: start_time = time.clock() task(*args, **kwargs) except Exception: end_time = time.clock() logger.exception( "Task {} raised error in {:.4f} seconds: with args: {} " "and kwargs: {}: {}".format( full_task_path, end_time - start_time, args, kwargs, traceback.format_exc(), ) ) return True else: end_time = time.clock() self.conn.delete_message( QueueUrl=queue_url, ReceiptHandle=message['ReceiptHandle'] ) logger.info( "Processed task {} in {:.4f} seconds with args: {} " "and kwargs: {}".format( full_task_path, end_time - start_time, repr(args), repr(kwargs), ) ) return True