def test_sqs_timeout(self): c = SQSConnection() queue_name = "test_sqs_timeout_%s" % int(time.time()) queue = c.create_queue(queue_name) self.addCleanup(c.delete_queue, queue, True) start = time.time() poll_seconds = 2 response = queue.read(visibility_timeout=None, wait_time_seconds=poll_seconds) total_time = time.time() - start self.assertTrue( total_time > poll_seconds, "SQS queue did not block for at least %s seconds: %s" % (poll_seconds, total_time), ) self.assertIsNone(response) # Now that there's an element in the queue, we should not block for 2 # seconds. c.send_message(queue, "test message") start = time.time() poll_seconds = 2 message = c.receive_message( queue, number_messages=1, visibility_timeout=None, attributes=None, wait_time_seconds=poll_seconds )[0] total_time = time.time() - start self.assertTrue( total_time < poll_seconds, "SQS queue blocked longer than %s seconds: %s" % (poll_seconds, total_time) ) self.assertEqual(message.get_body(), "test message") attrs = c.get_queue_attributes(queue, "ReceiveMessageWaitTimeSeconds") self.assertEqual(attrs["ReceiveMessageWaitTimeSeconds"], "0")
def test_sqs_timeout(self): c = SQSConnection() queue_name = 'test_sqs_timeout_%s' % int(time.time()) queue = c.create_queue(queue_name) self.addCleanup(c.delete_queue, queue, True) start = time.time() poll_seconds = 2 response = queue.read(visibility_timeout=None, wait_time_seconds=poll_seconds) total_time = time.time() - start self.assertTrue( total_time > poll_seconds, "SQS queue did not block for at least %s seconds: %s" % (poll_seconds, total_time)) self.assertIsNone(response) # Now that there's an element in the queue, we should not block for 2 # seconds. c.send_message(queue, 'test message') start = time.time() poll_seconds = 2 message = c.receive_message(queue, number_messages=1, visibility_timeout=None, attributes=None, wait_time_seconds=poll_seconds)[0] total_time = time.time() - start self.assertTrue( total_time < poll_seconds, "SQS queue blocked longer than %s seconds: %s" % (poll_seconds, total_time)) self.assertEqual(message.get_body(), 'test message') attrs = c.get_queue_attributes(queue, 'ReceiveMessageWaitTimeSeconds') self.assertEqual(attrs['ReceiveMessageWaitTimeSeconds'], '0')
def _process_message(): if not g.sitemap_sqs_queue: return sqs = SQSConnection() sqs_q = sqs.get_queue(g.sitemap_sqs_queue) messages = sqs.receive_message(sqs_q, number_messages=1) if not messages: return message, = messages js = json.loads(message.get_body()) s3path = parse_s3_path(js['location']) # There are some error cases that allow us to get messages # for sitemap creation that are now out of date. timestamp = js.get('timestamp') if timestamp is not None and _before_last_sitemap(timestamp): sqs_q.delete_message(message) return g.log.info("Got import job %r", js) subreddits = find_all_subreddits(s3path) store_sitemaps_in_s3(subreddits) sqs_q.delete_message(message)
def _recieve_sqs_message(): sqs = SQSConnection() sqs_q = sqs.get_queue(g.sitemap_sqs_queue) messages = sqs.receive_message(sqs_q, number_messages=1) if not messages: yield return message, = messages js = json.loads(message.get_body()) g.log.info('Received import job %r', js) yield _normalize_sqs_message(js) sqs_q.delete_message(message)
def _recieve_sqs_message(): sqs = SQSConnection() sqs_q = sqs.get_queue(g.sitemap_sqs_queue) messages = sqs.receive_message(sqs_q, number_messages=1) if not messages: yield return message, = messages js = json.loads(message.get_body()) g.log.info('Received import job %r', js) yield _normalize_sqs_message(js) sqs_q.delete_message(message)
def do_run(self): queue = None reservation = None instance = None try: cfg = self.configuration ec2_conn = EC2Connection(cfg['aws_access_key_id'], cfg['aws_secret_access_key']) #, region=get_region(cfg['ec2_region'])) sqs_conn = SQSConnection(cfg['aws_access_key_id'], cfg['aws_secret_access_key']) # Create a queue for the results. Setup a policy that allows the EC2 instance # to call SendMessage on the result queue without authentication. queue_name = 'minion_plugin_service_session_' + self.session_id queue = sqs_conn.create_queue(queue_name) queue_url = "https://sqs.%s.amazonaws.com/%d/%s" % (cfg["ec2_region"], cfg["aws_account_id"], queue.name) logging.info("Queue url is " + queue_url) # Start an instance. Wait a few minutes for it to start up. user_data = dict((k,v) for k,v in cfg.iteritems() if not k.startswith("aws_")) user_data['minion_results_queue_url'] = queue_url user_data['minion_plugin_session_id'] = self.session_id user_data['minion_plugin_name'] = cfg['minion_plugin_name'] logging.debug("User data for instance is %s" % str(user_data)) reservation = ec2_conn.run_instances(cfg["ec2_image_id"], user_data=json.dumps(user_data), instance_type=cfg["ec2_instance_type"], instance_initiated_shutdown_behavior="terminate", key_name=cfg['ec2_key_name']) instance = reservation.instances[0] # Set the queue policy to allow anonymous requests from the instance just booted up policy = { "Version": "2008-10-17", "Id": "MinionPolicy_" + self.session_id, "Statement": { "Sid": "MinionStatement_" + self.session_id, "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": "sqs:SendMessage", "Resource": "arn:aws:sqs:%s:%d:%s" % (cfg['ec2_region'], cfg['aws_account_id'], queue_name), # TODO Find a proper fix for this. The queue name is reasonably random I think but it would # be nice to lock it down to just the instance. (Can't do that until the instance has booted # though, which means we need to inform the instance that it can run the plugin, blah) #"Condition": { "IpAddress": { "aws:SourceIp": "%s/32" % instance.ip_address } } } } sqs_conn.set_queue_attribute(queue, "Policy", json.dumps(policy)) # Wait for the instance to start logging.info("Waiting for instance to start up") expiration_time = time.time() + 120 while time.time() < expiration_time: state = instance.update() if state == 'running': break time.sleep(5) state = instance.update() if state != 'running': raise Exception("Failed to start instance") # Now that the instance is running we wait until it shuts itself down logging.info("Polling the queue and waiting for the instance to stop") while True: # Grab messages from the queue for message in sqs_conn.receive_message(queue): sqs_conn.delete_message(queue, message) logging.info("Received message from instance: " + str(message.get_body())) msg = json.loads(message.get_body()) if msg.get('type') == 'finish': #self.report_finish(exit_code=msg['data']['exit_code']) break if msg.get('type') == 'issues': self.report_issues(msg['data']) # Check if the instance has been terminated state = instance.update() if state in ('stopped', 'terminated'): break time.sleep(5) # Final grab of messages from the queue for message in sqs_conn.receive_message(queue): sqs_conn.delete_message(queue, message) logging.info("Received message from instance: " + str(message.get_body())) msg = json.loads(message.get_body()) if msg.get('type') == 'finish': #self.report_finish(exit_code=msg['data']['exit_code']) break if msg.get('type') == 'issues': self.report_issues(msg['data']) except Exception as e: logging.exception("Uncaught exception thrown while controlling EC2 instance") finally: logging.info("Deleting the queue") if sqs_conn and queue: try: sqs_conn.delete_queue(queue, force_deletion=True) except Exception as e: logging.exception("Failed to delete queue " + queue.name) logging.info("Deleting the instance") if ec2_conn and instance: try: instance.terminate() except Exception as e: logging.exception("Failed to terminate instance " + str(instance))
glacier_layer1 = Layer1(aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, region_name = aws_region_name); glacier_layer1.describe_vault(vault_name); log('Kick Glacier job.'); job_data = dict(Description = 'Retrieve archive info to empty vault.', Format = 'JSON', SNSTopic = sns_topic_arn, Type = 'inventory-retrieval'); job_info = glacier_layer1.initiate_job(vault_name, job_data); job_id = job_info['JobId']; glacier_layer1.describe_job(vault_name, job_id); log('Wait for Glacier job completion.'); confirmed = False; while confirmed == False: log('Sleep for a while...'); time.sleep(3600.0); # 1 hour. log('Try to receive SQS message.'); sqs_msg_list = sqs_con.receive_message(sqs_queue, wait_time_seconds = 20); if sqs_msg_list: log('SQS message received and Check Glacier job status.'); job_status_list = glacier_layer1.list_jobs(vault_name, completed = True, status_code = 'Succeeded'); for job_status in job_status_list['JobList']: if job_status['JobId'] == job_id: log('Glacier job completion confirmed.'); confirmed = True; break; log('Get Glacier job output.'); job_output = glacier_layer1.get_job_output(vault_name, job_id); job_output_file = open(job_output_file, 'w'); job_output_file.write(json.dumps(job_output)); job_output_file.close();