class awssqs: def __init__(self, name, visibility_timeout=60): self.visibility_timeout = visibility_timeout self.conn = SQSConnection(region=boto.sqs.regions()[1]) # eu-west1 self.q = self.conn.create_queue(name) if self.q is None: raise Exception("Could not get that queue " + name) self.name = name def write(self, message): if self.q is None: raise Exception("Queue is none " + self.name) m = Message() m.set_body(message) success = self.q.write(m) failed = 0 while not success: time.sleep(5) success = self.q.write(m) # Keep trying until success failed += 1 if failed > 10: raise Exception("Failed over 10 times to write to queue %s!" % self.name) # Return a Message, use m.get_body() to get the text def read(self): if self.q is None: raise Exception("Queue is none " + self.name) rs = self.q.get_messages(visibility_timeout=self.visibility_timeout) if len(rs) > 0: m = rs[0] return m return None def length(self): if self.q is None: raise Exception("Queue is none " + self.name) rs = self.q.get_messages(visibility_timeout=self.visibility_timeout) return len(rs) def delete(self, m): self.q.delete_message(m) def deleteQueue(self): self.conn.delete_queue(self.q, force_deletion=True) self.q = None self.conn.close()
class awssqs: def __init__(self, name, visibility_timeout=60): self.visibility_timeout = visibility_timeout self.conn = SQSConnection(region=boto.sqs.regions()[1]) # eu-west1 self.q = self.conn.create_queue(name) if self.q is None: raise Exception("Could not get that queue " + name) self.name = name def write(self, message): if self.q is None: raise Exception("Queue is none " + self.name) m = Message() m.set_body(message) success = self.q.write(m) failed = 0 while not success: time.sleep(5) success = self.q.write(m) # Keep trying until success failed +=1 if failed > 10: raise Exception("Failed over 10 times to write to queue %s!" % self.name) # Return a Message, use m.get_body() to get the text def read(self): if self.q is None: raise Exception("Queue is none " + self.name) rs = self.q.get_messages(visibility_timeout = self.visibility_timeout) if len(rs) > 0: m = rs[0] return m return None def length(self): if self.q is None: raise Exception("Queue is none " + self.name) rs = self.q.get_messages(visibility_timeout = self.visibility_timeout) return len(rs) def delete(self, m): self.q.delete_message(m) def deleteQueue(self): self.conn.delete_queue(self.q, force_deletion = True) self.q = None self.conn.close()
def test_queue_deletion_affects_full_queues(self): conn = SQSConnection() initial_count = len(conn.get_all_queues()) empty = conn.create_queue("empty%d" % int(time.time())) full = conn.create_queue("full%d" % int(time.time())) time.sleep(60) # Make sure they're both around. self.assertEqual(len(conn.get_all_queues()), initial_count + 2) # Put a message in the full queue. m1 = Message() m1.set_body("This is a test message.") full.write(m1) self.assertEqual(full.count(), 1) self.assertTrue(conn.delete_queue(empty)) # Here's the regression for the docs. SQS will delete a queue with # messages in it, no ``force_deletion`` needed. self.assertTrue(conn.delete_queue(full)) # Wait long enough for SQS to finally remove the queues. time.sleep(90) self.assertEqual(len(conn.get_all_queues()), initial_count)
def test_queue_deletion_affects_full_queues(self): conn = SQSConnection() initial_count = len(conn.get_all_queues()) empty = conn.create_queue('empty%d' % int(time.time())) full = conn.create_queue('full%d' % int(time.time())) time.sleep(60) # Make sure they're both around. self.assertEqual(len(conn.get_all_queues()), initial_count + 2) # Put a message in the full queue. m1 = Message() m1.set_body('This is a test message.') full.write(m1) self.assertEqual(full.count(), 1) self.assertTrue(conn.delete_queue(empty)) # Here's the regression for the docs. SQS will delete a queue with # messages in it, no ``force_deletion`` needed. self.assertTrue(conn.delete_queue(full)) # Wait long enough for SQS to finally remove the queues. time.sleep(90) self.assertEqual(len(conn.get_all_queues()), initial_count)
class SQS(object): def __init__(self, config="config.ini"): if isinstance(config, basestring): config = credentials.ConfigFileCredentials(config) elif not isinstance(config, credentials.Credentials): raise TypeError("Unsupported config parameter type") aws_access_key_id, aws_secret_access_key, aws_queue = config.get_data() try: self.conn = SQSConnection(aws_access_key_id, aws_secret_access_key) self.set_queue(aws_queue) except: print 'Error connection' def get_all_queues(self): return self.conn.get_all_queues() def get_queue_attributes(self): return self.conn.get_queue_attributes(self.queue, attribute='All') def create_queue(self, queue, timeout): return self.conn.create_queue(queue, timeout) def set_queue(self, queue): self.queue = self.conn.get_queue(queue) return True def get_messages(self, limit=10): return self.queue.get_messages(limit) def count(self): #print "Count: %s" % self.queue.count() return self.queue.count() def write(self, data): m = Message() m.set_body(json.dumps(data)) return self.queue.write(m) def delete(self, id): #print "Eliminando %s" % id self.queue.delete_message(id) def clear(self): return self.queue.clear() def delete_queue(self): return self.conn.delete_queue(self.queue)
def test_1_basic(self): print '--- running SQSConnection tests ---' c = SQSConnection() rs = c.get_all_queues() num_queues = 0 for q in rs: num_queues += 1 # try illegal name try: queue = c.create_queue('bad_queue_name') except SQSError: pass # now create one that should work and should be unique (i.e. a new one) queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) time.sleep(30) rs = c.get_all_queues() i = 0 for q in rs: i += 1 assert i == num_queues+1 assert queue.count_slow() == 0 # check the visibility timeout t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now try to get queue attributes a = q.get_attributes() assert a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') a = q.get_attributes('ApproximateNumberOfMessages') assert a.has_key('ApproximateNumberOfMessages') assert not a.has_key('VisibilityTimeout') a = q.get_attributes('VisibilityTimeout') assert not a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') # now change the visibility timeout timeout = 45 queue.set_timeout(timeout) time.sleep(30) t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now add a message message_body = 'This is a test\n' message = queue.new_message(message_body) queue.write(message) time.sleep(30) assert queue.count_slow() == 1 time.sleep(30) # now read the message from the queue with a 10 second timeout message = queue.read(visibility_timeout=10) assert message assert message.get_body() == message_body # now immediately try another read, shouldn't find anything message = queue.read() assert message == None # now wait 10 seconds and try again time.sleep(10) message = queue.read() assert message if c.APIVersion == '2007-05-01': # now terminate the visibility timeout for this message message.change_visibility(0) # now see if we can read it in the queue message = queue.read() assert message # now delete the message queue.delete_message(message) time.sleep(30) assert queue.count_slow() == 0 # create another queue so we can test force deletion # we will also test MHMessage with this queue queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) queue.set_message_class(MHMessage) time.sleep(30) # now add a couple of messages message = queue.new_message() message['foo'] = 'bar' queue.write(message) message_body = {'fie' : 'baz', 'foo' : 'bar'} message = queue.new_message(body=message_body) queue.write(message) time.sleep(30) m = queue.read() assert m['foo'] == 'bar' # now delete that queue and messages c.delete_queue(queue, True) print '--- tests completed ---'
def test_1_basic(self): print '--- running SQSConnection tests ---' c = SQSConnection() rs = c.get_all_queues() num_queues = 0 for q in rs: num_queues += 1 # try illegal name try: queue = c.create_queue('bad*queue*name') self.fail('queue name should have been bad') except SQSError: pass # now create one that should work and should be unique (i.e. a new one) queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) time.sleep(60) rs = c.get_all_queues() i = 0 for q in rs: i += 1 assert i == num_queues+1 assert queue.count_slow() == 0 # check the visibility timeout t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now try to get queue attributes a = q.get_attributes() assert a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') a = q.get_attributes('ApproximateNumberOfMessages') assert a.has_key('ApproximateNumberOfMessages') assert not a.has_key('VisibilityTimeout') a = q.get_attributes('VisibilityTimeout') assert not a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') # now change the visibility timeout timeout = 45 queue.set_timeout(timeout) time.sleep(60) t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now add a message message_body = 'This is a test\n' message = queue.new_message(message_body) queue.write(message) time.sleep(60) assert queue.count_slow() == 1 time.sleep(90) # now read the message from the queue with a 10 second timeout message = queue.read(visibility_timeout=10) assert message assert message.get_body() == message_body # now immediately try another read, shouldn't find anything message = queue.read() assert message == None # now wait 30 seconds and try again time.sleep(30) message = queue.read() assert message # now delete the message queue.delete_message(message) time.sleep(30) assert queue.count_slow() == 0 # try a batch write num_msgs = 10 msgs = [(i, 'This is message %d' % i, 0) for i in range(num_msgs)] queue.write_batch(msgs) # try to delete all of the messages using batch delete deleted = 0 while deleted < num_msgs: time.sleep(5) msgs = queue.get_messages(num_msgs) if msgs: br = queue.delete_message_batch(msgs) deleted += len(br.results) # create another queue so we can test force deletion # we will also test MHMessage with this queue queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) queue.set_message_class(MHMessage) time.sleep(30) # now add a couple of messages message = queue.new_message() message['foo'] = 'bar' queue.write(message) message_body = {'fie' : 'baz', 'foo' : 'bar'} message = queue.new_message(body=message_body) queue.write(message) time.sleep(30) m = queue.read() assert m['foo'] == 'bar' # now delete that queue and messages c.delete_queue(queue, True) print '--- tests completed ---'
class sqsUtil(object): '''Methods to use aws sqs''' def __init__(self,aws_access_key,aws_secret_key,region=None): '''Default constructor. Inputs: aws_access_key = access key provided by aws aws_secret_key = secret key associated with access key ''' self.__access_key = aws_access_key self.__secret_key = aws_secret_key self.__region = region self.__conn = None def open(self): '''Creates a new sqs connection with given access and secret key.''' self.__conn = SQSConnection(aws_access_key_id = self.__access_key, aws_secret_access_key = self.__secret_key) def createQueue(self,name,visibilityTimeout=30): '''Creates a new queue. Input: name = name to associate with queue visibilityTimeout = how long, in seconds, to hide a message before it can be read again. Returns: True if queue was successfully created. ''' if not self.__checkConn(): return False self.__conn.create_queue(name,visibilityTimeout) return True def getVisibilityTimeout(self,name): '''Get visibility time out of a queue. Inputs: name = name of queue ''' if not self.__checkConn(): return False def getQueue(self,name): '''Get a queue. Inputs: name = name of queue Returns: A queue object or None if queue object not found ''' if not self.__checkConn(): return None return self.__conn.get_queue(name) def getAllQueues(self,prefix=""): '''Get a list of all queues created. Inputs: prefix = restricts return to names that start with prefix Returns: A ResultSet object of queues. Calling id on one of these queues will get the id of the queue. ''' if not self.__checkConn(): return None return self.__conn.get_all_queues(prefix) def writeToQueue(self,name,message,messageClass=None): '''Write a message to a queue. Inputs: name = name of queue to write to message = message to write into queue messageClass = a custom message class to use to write into queue Returns: False if message was not written, true otherwise ''' if not self.__checkConn(): return False q = self.getQueue(name) if q is None: return False if messageClass is not None: q.set_message_class(messageClass) else: messageClass = Message m = messageClass() m.set_body(message) q.write(m) return True def readFromQueue(self,name,num=1,visibilityTimeout=None,delete=False): '''Read a message from a queue. Inputs: name = name of queue to read from num = number of messages to read from queue visibilityTimeout = setting this will change the time until the next reader can read the messages received delete = true will delete all messages read from the queue Returns: ResultSet object of messages read. Calling get_body() on one of these messages will return the message it contains. Returns None if queue was not successfully read. ''' if not self.__checkConn(): return None q = self.getQueue(name) if q is None: return None if visibilityTimeout: results = q.get_messages(num_messages=num, visibility_timeout=visibilityTimeout) else: results = q.get_messages(num_messages=num) if delete: for m in results: q.delete_message(m) return results def readFromQueueToFile(self,name,fileName,separator="\n----------\n"): '''Dump all the messages from a queue into a local file. Inputs: name = name of queue fileName = name of file to dump to separator = what to place between messages in the file Returns: True if queue was successfully dumped. ''' if not self.__checkConn(): return False q = self.getQueue(name) if q is None: return False #why does this boto function have an underscore??? q.dump_(fileName,sep=separator) return True def deleteQueue(self,name,clear=False): '''Delete a queue. name = name of queue clear = if True, will clear the queue before deleting it. This method will not delete the queue if it is not empty. Returns: True if queue was successfully deleted. A queue will not be deleted if it still contains messages. ''' if not self.__checkConn(): return False q = self.getQueue(name) if q is None: return False if clear: q.clear() return self.__conn.delete_queue(q) def __checkConn(self): '''Helper function to make sure SQS connection is open.''' if not self.__conn: print("[ERROR] must call open() before using this function.") return False return True
def test_1_basic(self): print '--- running SQSConnection tests ---' c = SQSConnection() rs = c.get_all_queues() num_queues = 0 for q in rs: num_queues += 1 # try illegal name try: queue = c.create_queue('bad_queue_name') except SQSError: pass # now create one that should work and should be unique (i.e. a new one) queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) time.sleep(10) rs = c.get_all_queues() i = 0 for q in rs: i += 1 assert i == num_queues+1 assert queue.count_slow() == 0 # check the visibility timeout t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now try to get queue attributes a = q.get_attributes() assert a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') a = q.get_attributes('ApproximateNumberOfMessages') assert a.has_key('ApproximateNumberOfMessages') assert not a.has_key('VisibilityTimeout') a = q.get_attributes('VisibilityTimeout') assert not a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') # now change the visibility timeout timeout = 45 queue.set_timeout(timeout) t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now add a message message_body = 'This is a test\n' message = queue.new_message(message_body) queue.write(message) time.sleep(5) assert queue.count_slow() == 1 time.sleep(10) # now read the message from the queue with a 10 second timeout message = queue.read(visibility_timeout=10) assert message assert message.get_body() == message_body # now immediately try another read, shouldn't find anything message = queue.read() assert message == None # now wait 10 seconds and try again time.sleep(10) message = queue.read() assert message # now delete the message queue.delete_message(message) time.sleep(5) assert queue.count_slow() == 0 # now delete that queue c.delete_queue(queue) rs = c.get_all_queues() i = 0 for q in rs: i += 1 assert i == num_queues print '--- tests completed ---'
# this script expects 2 environment variables # 1. SQS_KEY_ID (preferably an IAM user with limited rights) # 2. SQS_SECRET_KEY (accompanying secret key) # 3. SQS_TASK_QUEUE (the queue to use) import os from boto.sqs.connection import SQSConnection from boto.sqs.message import Message import utils # your amazon keys key = os.environ['SQS_KEY_ID'] access = os.environ['SQS_ACCESS_KEY'] queue = os.environ['SQS_TASK_QUEUE'] if __name__ == '__main__': region_info = utils.get_region_info() sqs = SQSConnection(key, access, region=region_info) tasks = sqs.create_queue(queue) m = tasks.read() while m != None: print( m.get_body()) tasks.delete_message(m) m = tasks.read() sqs.delete_queue(tasks)
class Docsplitter: def __init__(self): self.ec2conn = EC2Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) self.sqsconn = SQSConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) self.q = self.sqsconn.create_queue('todo') self.conn_s3 = boto.connect_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY) try: self.bucket = self.conn_s3.create_bucket(AWS_BUCKET) except: self.bucket = self.conn_s3.get_bucket(AWS_BUCKET) def destroy_ec2(self): """kill any existing instances -- only call this if you're not using EC2 for unrelated purposes!!!!""" ins = self.ec2conn.get_all_spot_instance_requests() if len(ins): self.ec2conn.cancel_spot_instance_requests([x.id for x in ins]) all = self.ec2conn.get_all_instances() if len(all): self.ec2conn.terminate_instances([x.instances[0].id for x in all]) print "shutting down %s old instances" % len(all) def destroy_queue(self): """clear the SQS queue of files to process""" self.sqsconn.delete_queue(self.q, force_deletion=True) time.sleep(63) #amazon makes us wait a minute after deleting old queue self.q = self.sqsconn.create_queue('todo') def add_to_queue(self,tuples): """Make a queue of files to process, given urlpairs, which is a series of two-tuples containing a primary key for an object and a URL of a PDF.""" for t in tuples: string = "%s,%s" % (t[0],t[1]) m = Message() m.set_body(string) status = self.q.write(m) print "added %s to list" % len(tuples) def start(self,docs_per_hour): """how many computers should we spin up to process the just-generated list? how many docs can one process per hour? this uses medium instances, dual-core, and runs two processes at once.""" instances = int(math.ceil(self.q.count() / docs_per_hour)) if instances>AWS_MAX_INSTANCES: instances=AWS_MAX_INSTANCES k=Key(self.bucket,'s3.py') k.set_contents_from_filename('s3.py', policy='private') k=Key(self.bucket,'aws.py') k.set_contents_from_filename('aws.py', policy='private') script = """#!/bin/bash cd /dev/shm python -c "import boto from boto.s3.key import Key AWS_ACCESS_KEY_ID = '%s' AWS_SECRET_ACCESS_KEY = '%s' conn_s3 = boto.connect_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY) bucket = conn_s3.get_bucket('%s') k = bucket.get_key('s3.py') py = k.get_contents_as_string() fout = open('s3.py','w') fout.write(py) fout.close() k = bucket.get_key('aws.py') py = k.get_contents_as_string() fout = open('aws.py','w') fout.write(py) fout.close()" python s3.py & p=$! python s3.py while kill -0 "$p"; do sleep 5 done sudo shutdown -h now """ % (AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,AWS_BUCKET) print "spinning up %s instances for %s docs" % (instances,self.q.count()) spot = self.ec2conn.request_spot_instances(AWS_PRICE_PER_HOUR, ami_id, count=instances, type='one-time', key_name=AWS_KEY_PAIR_NAME, user_data=script, instance_type=AWS_INSTANCE_TYPE) def retrieve(self): """get an iterator of tuples with (id,OCR'd text)""" keys = self.bucket.list() for key in keys: filename = key.name if filename.startswith('output/'): id = filename[len('output/'):] k = Key(self.bucket, filename) txt = k.get_contents_as_string() yield (id,txt) def showprogress(self): """check in every 5 minutes until everything's done""" remaining = self.q.count() while remaining>0: print "%s docs remaining" % remaining time.sleep(5*60) remaining = self.q.count() def delete_output(self): """remove the OCR'd text files from the Amazon S3 bucket after you no longer need them""" keys = self.bucket.list() for key in keys: filename = key.name if filename.startswith('output/'): k = Key(self.bucket, filename) self.bucket.delete_key(k)
# To use this tool, the environment must have AWS_ACCESS_KEY_ID set to the # AWS access key and AWS_SECRET_ACCESS_KEY set to the secret access key. from boto.sqs.connection import SQSConnection from boto.sqs.message import Message from subprocess import * from boto.exception import * import time import pickle conn = SQSConnection() queues = conn.get_all_queues() for queue in queues: print queue.id # print queue.get_attributes() m = queue.read(10) while m != None: try: print pickle.loads(m.get_body()).class_ad except: m = queue.read(10) continue print queue.delete_message(m) m = queue.read(10) try: conn.delete_queue(queue) except BotoServerError, error: print 'Unable to delete SQS queue %s: %s, %s' % (queue.id, error.reason, error.body) pass
def test_1_basic(self): print '--- running SQSConnection tests ---' c = SQSConnection() rs = c.get_all_queues() num_queues = 0 for q in rs: num_queues += 1 # try illegal name try: queue = c.create_queue('bad_queue_name') except SQSError: pass # now create one that should work and should be unique (i.e. a new one) queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) time.sleep(60) rs = c.get_all_queues() i = 0 for q in rs: i += 1 assert i == num_queues + 1 assert queue.count_slow() == 0 # check the visibility timeout t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now try to get queue attributes a = q.get_attributes() assert a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') a = q.get_attributes('ApproximateNumberOfMessages') assert a.has_key('ApproximateNumberOfMessages') assert not a.has_key('VisibilityTimeout') a = q.get_attributes('VisibilityTimeout') assert not a.has_key('ApproximateNumberOfMessages') assert a.has_key('VisibilityTimeout') # now change the visibility timeout timeout = 45 queue.set_timeout(timeout) time.sleep(60) t = queue.get_timeout() assert t == timeout, '%d != %d' % (t, timeout) # now add a message message_body = 'This is a test\n' message = queue.new_message(message_body) queue.write(message) time.sleep(30) assert queue.count_slow() == 1 time.sleep(30) # now read the message from the queue with a 10 second timeout message = queue.read(visibility_timeout=10) assert message assert message.get_body() == message_body # now immediately try another read, shouldn't find anything message = queue.read() assert message == None # now wait 30 seconds and try again time.sleep(30) message = queue.read() assert message if c.APIVersion == '2007-05-01': # now terminate the visibility timeout for this message message.change_visibility(0) # now see if we can read it in the queue message = queue.read() assert message # now delete the message queue.delete_message(message) time.sleep(30) assert queue.count_slow() == 0 # create another queue so we can test force deletion # we will also test MHMessage with this queue queue_name = 'test%d' % int(time.time()) timeout = 60 queue = c.create_queue(queue_name, timeout) queue.set_message_class(MHMessage) time.sleep(30) # now add a couple of messages message = queue.new_message() message['foo'] = 'bar' queue.write(message) message_body = {'fie': 'baz', 'foo': 'bar'} message = queue.new_message(body=message_body) queue.write(message) time.sleep(30) m = queue.read() assert m['foo'] == 'bar' # now delete that queue and messages c.delete_queue(queue, True) print '--- tests completed ---'
def do_run(self): queue = None reservation = None instance = None try: cfg = self.configuration ec2_conn = EC2Connection(cfg['aws_access_key_id'], cfg['aws_secret_access_key']) #, region=get_region(cfg['ec2_region'])) sqs_conn = SQSConnection(cfg['aws_access_key_id'], cfg['aws_secret_access_key']) # Create a queue for the results. Setup a policy that allows the EC2 instance # to call SendMessage on the result queue without authentication. queue_name = 'minion_plugin_service_session_' + self.session_id queue = sqs_conn.create_queue(queue_name) queue_url = "https://sqs.%s.amazonaws.com/%d/%s" % (cfg["ec2_region"], cfg["aws_account_id"], queue.name) logging.info("Queue url is " + queue_url) # Start an instance. Wait a few minutes for it to start up. user_data = dict((k,v) for k,v in cfg.iteritems() if not k.startswith("aws_")) user_data['minion_results_queue_url'] = queue_url user_data['minion_plugin_session_id'] = self.session_id user_data['minion_plugin_name'] = cfg['minion_plugin_name'] logging.debug("User data for instance is %s" % str(user_data)) reservation = ec2_conn.run_instances(cfg["ec2_image_id"], user_data=json.dumps(user_data), instance_type=cfg["ec2_instance_type"], instance_initiated_shutdown_behavior="terminate", key_name=cfg['ec2_key_name']) instance = reservation.instances[0] # Set the queue policy to allow anonymous requests from the instance just booted up policy = { "Version": "2008-10-17", "Id": "MinionPolicy_" + self.session_id, "Statement": { "Sid": "MinionStatement_" + self.session_id, "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": "sqs:SendMessage", "Resource": "arn:aws:sqs:%s:%d:%s" % (cfg['ec2_region'], cfg['aws_account_id'], queue_name), # TODO Find a proper fix for this. The queue name is reasonably random I think but it would # be nice to lock it down to just the instance. (Can't do that until the instance has booted # though, which means we need to inform the instance that it can run the plugin, blah) #"Condition": { "IpAddress": { "aws:SourceIp": "%s/32" % instance.ip_address } } } } sqs_conn.set_queue_attribute(queue, "Policy", json.dumps(policy)) # Wait for the instance to start logging.info("Waiting for instance to start up") expiration_time = time.time() + 120 while time.time() < expiration_time: state = instance.update() if state == 'running': break time.sleep(5) state = instance.update() if state != 'running': raise Exception("Failed to start instance") # Now that the instance is running we wait until it shuts itself down logging.info("Polling the queue and waiting for the instance to stop") while True: # Grab messages from the queue for message in sqs_conn.receive_message(queue): sqs_conn.delete_message(queue, message) logging.info("Received message from instance: " + str(message.get_body())) msg = json.loads(message.get_body()) if msg.get('type') == 'finish': #self.report_finish(exit_code=msg['data']['exit_code']) break if msg.get('type') == 'issues': self.report_issues(msg['data']) # Check if the instance has been terminated state = instance.update() if state in ('stopped', 'terminated'): break time.sleep(5) # Final grab of messages from the queue for message in sqs_conn.receive_message(queue): sqs_conn.delete_message(queue, message) logging.info("Received message from instance: " + str(message.get_body())) msg = json.loads(message.get_body()) if msg.get('type') == 'finish': #self.report_finish(exit_code=msg['data']['exit_code']) break if msg.get('type') == 'issues': self.report_issues(msg['data']) except Exception as e: logging.exception("Uncaught exception thrown while controlling EC2 instance") finally: logging.info("Deleting the queue") if sqs_conn and queue: try: sqs_conn.delete_queue(queue, force_deletion=True) except Exception as e: logging.exception("Failed to delete queue " + queue.name) logging.info("Deleting the instance") if ec2_conn and instance: try: instance.terminate() except Exception as e: logging.exception("Failed to terminate instance " + str(instance))
job_status_list = glacier_layer1.list_jobs(vault_name, completed = True, status_code = 'Succeeded'); for job_status in job_status_list['JobList']: if job_status['JobId'] == job_id: log('Glacier job completion confirmed.'); confirmed = True; break; log('Get Glacier job output.'); job_output = glacier_layer1.get_job_output(vault_name, job_id); job_output_file = open(job_output_file, 'w'); job_output_file.write(json.dumps(job_output)); job_output_file.close(); log('Delete all archives in Glacier vault.'); for idx, archive_info in enumerate(job_output['ArchiveList']): if idx % 100 == 0: # per 100 id. log('Renew Glacier object to avoid socket error while archive deletion.'); glacier_layer1 = Layer1(aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, region_name = aws_region_name); try: glacier_layer1.delete_archive(vault_name, archive_info['ArchiveId']); except: fail_msg = 'Fail to delete archive (%s) but Proceed to next.' % archive_info['ArchiveId']; log(fail_msg); log(traceback.format_exc()); continue; log('Delete SQS queue to clean up.'); sqs_con.delete_queue(sqs_queue); log('Finish!');