示例#1
0
  def dumpSQSQueues(self):
    '''
      Method to dump SQS queues info.
    '''

    try:
      if self.botoprfl[0] != "default":
        conn = boto.connect_sqs(profile_name = self.botoprfl)
      else:
        conn = boto.connect_sqs()
      if conn:
        print("\n<Start of Dump SQS Queues>\n")
        self.opygenericroutines.prntLogErrWarnInfo('', 'info', bresume = True)
        for q in conn.get_all_queues():
          sq = " %s" %q.name
          self.opygenericroutines.prntLogErrWarnInfo(str(sq), 'info', bresume = True)
          self.opygenericroutines.prntLogErrWarnInfo("   Url: %s" % str(q.url), 'info', bresume = True)
          dq = q.get_attributes()
          for a in self.tsqsinfo:
            if a == 'CreatedTimestamp' or a == 'LastModifiedTimestamp':
              sa = "   %s: %s" %(str(a),time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(float(dq[a]))))
            else:
              sa = "   %s: %s" %(a, dq[a])
            self.opygenericroutines.prntLogErrWarnInfo(str(sa), 'info', bresume = True)
        self.opygenericroutines.prntLogErrWarnInfo('', 'info', bresume = True)
        print("\n<End of Dump SQS Queues>\n")
    except Exception, e:
      serr = ('%s :: dumpSQSQueues(...) : connect_sqs,get_all_queues(...), '
              '%s' %(self.sclsnme, str(e)))
      self.opygenericroutines.prntLogErrWarnInfo(serr, bresume = True)
示例#2
0
def run(message):
   try_count = 0
   while True:
      if try_count != MAX_TRY:
         launch_result = launch_vm(message)
         #print launch_result
         result = json.loads(launch_result)
         if result['rescode'] == '1':
            sqs_conn = boto.connect_sqs()
            request_queue = sqs_conn.create_queue(REQUEST_QUEUE)
            request_queue.delete_message(message)
            print "Delete message from dev queue :("
            return 1
            break
         elif result['rescode'] == '2':
            print 'job fail'
            try_count = try_count +1
      else:
         print try_count
         print "last job fail"
         sqs_conn = boto.connect_sqs()
         request_queue = sqs_conn.create_queue(REQUEST_QUEUE)
         request_queue.delete_message(m)
         new_m = Message()
         new_m.set_body('new')
         status = request_queue.write(new_m)
         print "Re-insert queue message (fail) :(" 
         return 2
         break
示例#3
0
def test_worker_fills_internal_queue_from_celery_task():
    """
    Test read workers fill internal queue with celery tasks
    """
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    message = Message()
    body = '{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOApzcy4=\\n", "some stuff": "asdfasf"}'
    message.set_body(body)
    queue.write(message)

    internal_queue = Queue()
    worker = ReadWorker(queue, internal_queue)
    worker.read_message()

    packed_message = internal_queue.get(timeout=1)
    found_message_body = decode_message(packed_message['message'])
    found_message_body.should.equal({
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message2',
        },
    })
示例#4
0
def test_worker_fills_internal_queue_only_until_maximum_queue_size():
    """
    Test read workers fill internal queue only to maximum size
    """
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")
    queue.set_timeout(1)  # Set visibility timeout low to improve test speed

    message = Message()
    body = json.dumps({
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    })
    message.set_body(body)
    for i in range(3):
        queue.write(message)

    internal_queue = Queue(maxsize=2)
    worker = ReadWorker(queue, internal_queue)
    worker.read_message()

    # The internal queue should only have two messages on it
    internal_queue.get(timeout=1)
    internal_queue.get(timeout=1)

    try:
        internal_queue.get(timeout=1)
    except Empty:
        pass
    else:
        raise AssertionError("The internal queue should be empty")
示例#5
0
def test_worker_fills_internal_queue():
    """
    Test read workers fill internal queue
    """

    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    message = Message()
    body = json.dumps({
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    })
    message.set_body(body)
    queue.write(message)

    internal_queue = Queue()
    worker = ReadWorker(queue, internal_queue)
    worker.read_message()

    packed_message = internal_queue.get(timeout=1)
    found_message_body = decode_message(packed_message['message'])
    found_message_body.should.equal({
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    })
示例#6
0
def test_worker_fills_internal_queue_and_respects_visibility_timeouts():
    """
    Test read workers respect visibility timeouts
    """
    # Setup logging
    logger = logging.getLogger("pyqs")
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")
    queue.set_timeout(1)

    # Add MEssages
    message = Message()
    body = '{"body": "KGRwMApTJ3Rhc2snCnAxClMndGVzdHMudGFza3MuaW5kZXhfaW5jcmVtZW50ZXInCnAyCnNTJ2Fy\\nZ3MnCnAzCihscDQKc1Mna3dhcmdzJwpwNQooZHA2ClMnbWVzc2FnZScKcDcKUydUZXN0IG1lc3Nh\\nZ2UyJwpwOApzcy4=\\n", "some stuff": "asdfasf"}'
    message.set_body(body)
    queue.write(message)
    queue.write(message)
    queue.write(message)

    # Run Reader
    internal_queue = Queue(maxsize=1)
    worker = ReadWorker(queue, internal_queue)
    worker.read_message()

    # Check log messages
    logger.handlers[0].messages['warning'][0].should.contain("Timed out trying to add the following message to the internal queue")
    logger.handlers[0].messages['warning'][1].should.contain("Clearing Local messages since we exceeded their visibility_timeout")
示例#7
0
 def poll(self, wait=20, vtimeout=5):
     sqs = boto.connect_sqs()
     sqs = boto.sqs.connect_to_region('us-west-1')
     print '\n'
     print('- Client Queue Polling will begin in 100 seconds for any new messages.... %s' % self.clientQueueName)
     q = sqs.get_queue(self.clientQueueName)
     q.set_message_class(RawMessage)
     while True:
         time.sleep(100)
         #print('Will wait for reply from server for 100 seconds....')
         m = q.get_messages(10)
         #print('Num of msgs in my queue: %d' % len(m))
         #if there are messages in client q, read them and delete msgs
         if m:
             for result in m:
                 msg = json.loads(result.get_body())
                 fileName = msg.get('fileName')
                 fileHash = msg.get('fileHash')
                 if msg.get('restore') is not None:
                     print '- Files and location of files for restore:'
                     print msg.get('restore')
                 elif fileName is not None and fileHash is not None:
                     print '- Received list of files that need to be uploaded to S3 from server'
                     self.uploadFileToS3(fileName, fileHash)
                 q.delete_message(result)
                 print '- Done...Messages deleted from queue'
                 print ('\n')
         else:
             if len(m) == 0:
                 print ('\n')
                 print('- Writing to Server Q since nothing to read....')
                 self.sendMessageToServer()
         self.poll()
示例#8
0
 def setUp(self):
   self.s3_connection = connect_s3()
   self.sdb_connection = connect_sdb()
   self.sns_connection = connect_sns()
   self.sqs_connection = connect_sqs()
   self.gpg = GPG()
   self.event_handler = LockboxEventHandler()
  def __init__(self, config, transports):
    self.__transports = transports
    
    config_defaults = {
      'num_messages_to_get': 10, 
      'queue_wait_time': 20, 
      'max_events_threshold': 50,
      'json_messages': True
    }

    self.__config = dict(config_defaults.items() + config.items())

    # Lookup the region by name
    region = None
    for r in boto.sqs.regions():
      if r.name == config['aws_region']:
        region = r

    sqs_connection = boto.connect_sqs(
      region=region, 
      aws_access_key_id=config['aws_access_key_id'], 
      aws_secret_access_key=config['aws_secret_access_key'])

    self.queue = sqs_connection.get_queue(config['queue_name'])
    self.queue.set_message_class(RawMessage)

    if self.queue is None:
      logger.error("Could not find SQS queue %s" % config['queue_name'])
示例#10
0
def test_change_message_visibility_on_invalid_receipt():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=1)
    queue.set_message_class(RawMessage)

    queue.write(queue.new_message('this is another test message'))
    queue.count().should.equal(1)
    messages = conn.receive_message(queue, number_messages=1)

    assert len(messages) == 1

    original_message = messages[0]

    queue.count().should.equal(0)

    time.sleep(2)

    queue.count().should.equal(1)

    messages = conn.receive_message(queue, number_messages=1)

    assert len(messages) == 1

    original_message.change_visibility.when.called_with(
        100).should.throw(SQSError)
示例#11
0
文件: get_log.py 项目: jspring11/boto
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hl:o::',
                                   ['logqueue', 'logfilename'])
    except:
        usage()
    log_queue_name = None
    log_file_name = None
    for o, a in opts:
        if o in ('-h', '--help'):
            usage()
        if o in ('-l', '--logqueue'):
            log_queue_name = a
        if o in ('-o', '--logfilename'):
            log_file_name = a
    c = boto.connect_sqs()
    q = c.get_queue(log_queue_name)
    q.set_message_class(MHMessage)
    msgs = []
    msg = q.read()
    while msg:
        msgs.append(msg)
        q.delete_message(msg)
        msg = q.read()
    msgs.sort(compare_timestamps)
    fp = open(log_file_name, 'w')
    for msg in msgs:
        fp.write(msg.get_body())
        fp.write('\n-----------------------\n')
    fp.close()
示例#12
0
文件: fabfile.py 项目: rlisagor/mls
def setup_queues():
    """
    Create SQS queues for MLS app
    """
    sqs = boto.connect_sqs()
    sqs.create_queue('mls_parse_requests')
    sqs.create_queue('mls_fetcher')
示例#13
0
文件: util.py 项目: imclab/maptcha-v2
def connect_queue(key, secret, name):
    '''
    '''
    sqs = connect_sqs(key, secret)
    queue = sqs.create_queue(name) #will create (and return) the requested queue if it does not exist or will return the existing queue if it does
    
    return queue
示例#14
0
    def poll(self, wait=20, vtimeout=5):
        sqs = boto.connect_sqs()
        sqs = boto.sqs.connect_to_region('us-west-1')
        print('Polling after 1 min my client queue for any messages.... %s' % self.clientQueueName)
 #       time.sleep(60)
        q = sqs.get_queue(self.clientQueueName)
        q.set_message_class(RawMessage)
        while True:
            time.sleep(120)
            print('Waiting for server for 120 seconds....')
            m = q.get_messages(vtimeout)
            print('Num of msgs in my queue: %d' % len(m))
            #if there are messages in client q, read them and delete msgs
            if m:
                for result in m:
                    msg = json.loads(result.get_body())
                    print('MSG: %s' % msg.get('fileName'))
                    self.userFileName.append(msg.get('fileName'))
                    q.delete_message(result)
                    print 'filename: %s' % self.userFileName
                    if self.userFileName is not None:
                        #for fileName in self.userFileName:
#                            print fileName
                        self.uploadFileToS3(self.userFileName)
                    print 'Done...Msgs deleted'
示例#15
0
文件: test_sqs.py 项目: pbcole/moto
def test_queue_attributes():
    conn = boto.connect_sqs("the_key", "the_secret")

    queue_name = "test-queue"
    visibility_timeout = 60

    queue = conn.create_queue(queue_name, visibility_timeout=visibility_timeout)

    attributes = queue.get_attributes()

    attributes["QueueArn"].should.look_like("arn:aws:sqs:sqs.us-east-1:123456789012:%s" % queue_name)

    attributes["VisibilityTimeout"].should.look_like(str(visibility_timeout))

    attribute_names = queue.get_attributes().keys()
    attribute_names.should.contain("ApproximateNumberOfMessagesNotVisible")
    attribute_names.should.contain("MessageRetentionPeriod")
    attribute_names.should.contain("ApproximateNumberOfMessagesDelayed")
    attribute_names.should.contain("MaximumMessageSize")
    attribute_names.should.contain("CreatedTimestamp")
    attribute_names.should.contain("ApproximateNumberOfMessages")
    attribute_names.should.contain("ReceiveMessageWaitTimeSeconds")
    attribute_names.should.contain("DelaySeconds")
    attribute_names.should.contain("VisibilityTimeout")
    attribute_names.should.contain("LastModifiedTimestamp")
    attribute_names.should.contain("QueueArn")
示例#16
0
def main():
	sqs=boto.connect_sqs();
	raw_input("I am now going to create two queues.");
	sqs.create_queue("test");
	sqs.create_queue("test2");
	raw_input("Two queues were made. I am now going to list all queues I own.");
	for x in sqs.get_all_queues():
		print x
	queueInfo();
	queueInfo();
	raw_input("I am now going to delete queue 2. \n");
	sqs.delete_queue(sqs.get_queue("test2"));
	raw_input("Queue deleted. Here are the queues that are left.");
	print sqs.lookup("test");
	print sqs.lookup("test2");
	raw_input("I am now going to add 3 message to the queue. Test1 2 and 3.");
	for x in range(1,4):
		sqs.get_queue("test").write(sqs.get_queue("test").new_message("This is a Test"+str(x)));
	q1=sqs.get_queue_attributes(sqs.get_queue("test"));
	for x,y in q1.items():
		if x=="ApproximateNumberOfMessages":
			print x,y
			print ""
	raw_input("I am now going to dequeue the queue one by one then delete the queue");
	print str(sqs.get_queue("test").read().get_body())
	raw_input("First message dequeued. Here are the next two.");
	print sqs.get_queue("test").read().get_body()
	print sqs.get_queue("test").read().get_body()
	raw_input("Queue empty. Now deleting the queue");
	sqs.delete_queue(sqs.get_queue("test"));
	print sqs.lookup("test");
	print sqs.lookup("test2");
示例#17
0
def test_queue_attributes():
    conn = boto.connect_sqs('the_key', 'the_secret')

    queue_name = 'test-queue'
    visibility_timeout = 3

    queue = conn.create_queue(
        queue_name, visibility_timeout=visibility_timeout)

    attributes = queue.get_attributes()

    attributes['QueueArn'].should.look_like(
        'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name)

    attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout))

    attribute_names = queue.get_attributes().keys()
    attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible')
    attribute_names.should.contain('MessageRetentionPeriod')
    attribute_names.should.contain('ApproximateNumberOfMessagesDelayed')
    attribute_names.should.contain('MaximumMessageSize')
    attribute_names.should.contain('CreatedTimestamp')
    attribute_names.should.contain('ApproximateNumberOfMessages')
    attribute_names.should.contain('ReceiveMessageWaitTimeSeconds')
    attribute_names.should.contain('DelaySeconds')
    attribute_names.should.contain('VisibilityTimeout')
    attribute_names.should.contain('LastModifiedTimestamp')
    attribute_names.should.contain('QueueArn')
示例#18
0
def test_master_counts_processes():
    """
    Test managing process counts child processes
    """

    # Setup Logging
    logger = logging.getLogger("pyqs")
    del logger.handlers[:]
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    conn.create_queue("tester")

    # Setup Manager
    manager = ManagerWorker(["tester"], 2)
    manager.start()

    # Check Workers
    manager.process_counts()

    # Cleanup
    manager.stop()

    # Check messages
    msg1 = "Reader Processes: 1"
    logger.handlers[0].messages['debug'][-2].lower().should.contain(msg1.lower())
    msg2 = "Worker Processes: 2"
    logger.handlers[0].messages['debug'][-1].lower().should.contain(msg2.lower())
示例#19
0
def connectAndWriteToSQS(queue, data):
    sqs = boto.connect_sqs()
    sqs = boto.sqs.connect_to_region('us-west-1')   
    q = sqs.create_queue(queue)
    m = RawMessage()
    m.set_body(json.dumps(data))
    q.write(m)
示例#20
0
    def run(self, **kwargs):
        ec2 = boto.connect_ec2(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET)
        sqs = boto.connect_sqs(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET)

        queue = sqs.create_queue(REQUEST_QUEUE)
        num = queue.count()
        launched = 0
        icount = 0

        reservations = ec2.get_all_instances()
        for reservation in reservations:
            for instance in reservation.instances:
                if instance.state == "running" and instance.image_id == AMI_ID:
                    icount += 1
        to_boot = min(num - icount, MAX_INSTANCES)

        if to_boot > 0:
            startup = BOOTSTRAP_SCRIPT % {
                'KEY': settings.PDF_AWS_KEY,
                'SECRET': settings.PDF_AWS_SECRET,
                'RESPONSE_QUEUE': RESPONSE_QUEUE,
                'REQUEST_QUEUE': REQUEST_QUEUE}
            r = ec2.run_instances(
                image_id=AMI_ID,
                min_count=to_boot,
                max_count=to_boot,
                key_name=KEYPAIR,
                security_groups=SECURITY_GROUPS,
                user_data=startup)
            launched = len(r.instances)
        return launched
示例#21
0
 def __init__(self, **kwargs):
     self.canvas = kwargs.get('canvas')
     self.sqs = None
     try:
         self.sqs = boto.connect_sqs()
     except Exception, e:
         print e
示例#22
0
文件: test_sqs.py 项目: mansam/moto
def test_queue_length():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=60)

    conn.send_message(queue, 'this is a test message')
    conn.send_message(queue, 'this is another test message')
    queue.count().should.equal(2)
示例#23
0
	def delete_sqs_message(self, queue, message):
		"""
		Delete a message from an SQS queue
		"""
		conn = boto.connect_sqs(self.aws_id, self.aws_key)
		q = conn.create_queue(queue)
		q.delete_message(message)
示例#24
0
def test_master_replaces_worker_processes():
    """
    Test managing process replaces worker processes
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    conn.create_queue("tester")

    # Setup Manager
    manager = ManagerWorker(queue_prefixes=["tester"], worker_concurrency=1)
    manager.start()

    # Get Worker PID
    pid = manager.worker_children[0].pid

    # Kill Worker and wait to replace
    manager.worker_children[0].shutdown()
    time.sleep(0.1)
    manager.replace_workers()

    # Check Replacement
    manager.worker_children[0].pid.shouldnt.equal(pid)

    # Cleanup
    manager.stop()
示例#25
0
def wake_up_message_send():
   sqs_conn = boto.connect_sqs()
   request_queue = sqs_conn.create_queue(master_queue_name)
   rvm_host = get_rvm_hostname()
   if True:
      for i in range(0, 5):
         try:
            log.debug("Attempted to get hostname")
            fp = urllib.urlopen('http://%s/latest/meta-data/local-hostname' % rvm_host)
            local_hostname = fp.read()
            fp.close()
            if local_hostname:
               break
         except IOError:
            pass

      for i in range(0, 5):
         try:
            log.debug("Attempted to get hostip")
            fp = urllib.urlopen('http://%s/latest/meta-data/local-ipv4' % rvm_host)
            local_ipaddress = fp.read()
            fp.close()
            if local_ipaddress:
               break
         except IOError:
            pass

   new_message = Message()
   msg = "MASTER|%s|%s" % (local_hostname, local_ipaddress)
   new_message.set_body(msg)
   status = request_queue.write(new_message)
   log.debug("sending message '%s'" % msg)
示例#26
0
def get_or_create_queue(queue_name):
    conn = boto.connect_sqs()
    queue = conn.get_queue(queue_name)
    if queue:
        return queue
    else:
        return conn.create_queue(queue_name)
示例#27
0
文件: funcs.py 项目: yuanzheng/BYU
def ApprovalProcess(msg):
    data = dict()
    # The messages received from the imageresult queue
    # ???? format ??? str or int
    data['imagekey'] = str(msg['imagekey'])
    data['imageheight'] = str(msg['imageheight'])
    data['imagewidth'] = str(msg['imagewidth'])

    # retrive rest data from simpleDB 
    domain = _getImageDomain()
    item = domain.get_item(msg['imagekey'])

    data['imageURL'] = item['imageurl']
    data['tag'] = item['tag']
    data['description'] = item['description']
    data['submituser'] = item['submituser']
    data['submitdate'] = item['submitdate']

    # Connect to SQS and create the approval process queue
    sqs = boto.connect_sqs(AWSKey, AWSSecret)
    q = sqs.create_queue(approvalprocessQueue)

    # Put the message in the queue
    # m is raw message object
    # queue only accept message object
    m = RawMessage()
    m.set_body(json.dumps(data))
    # add comment to Queue
    status = q.write(m)
示例#28
0
def test_change_message_visibility():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=2)
    queue.set_message_class(RawMessage)

    body_one = 'this is another test message'
    queue.write(queue.new_message(body_one))

    queue.count().should.equal(1)
    messages = conn.receive_message(queue, number_messages=1)

    assert len(messages) == 1

    queue.count().should.equal(0)

    messages[0].change_visibility(2)

    # Wait
    time.sleep(1)

    # Message is not visible
    queue.count().should.equal(0)

    time.sleep(2)

    # Message now becomes visible
    queue.count().should.equal(1)

    messages = conn.receive_message(queue, number_messages=1)
    messages[0].delete()
    queue.count().should.equal(0)
示例#29
0
def test_master_handles_signals(sys):
    """
    Test managing process handles OS signals
    """

    # Setup SQS Queue
    conn = boto.connect_sqs()
    conn.create_queue("tester")

    # Mock out sys.exit
    sys.exit = Mock()

    # Have our inner method send our signal
    def process_counts():
        os.kill(os.getpid(), signal.SIGTERM)

    # Setup Manager
    manager = ManagerWorker(queue_prefixes=["tester"], worker_concurrency=1)
    manager.process_counts = process_counts
    manager._graceful_shutdown = MagicMock()

    # When we start and trigger a signal
    manager.start()
    manager.sleep()

    # Then we exit
    sys.exit.assert_called_once_with(0)
示例#30
0
def run(args):
    """
    0. check 'module_name'
    1. create private SQS queue
    2. subscribe queue to the specified 'topic'
    """
    enable_json=args.enable_json
    module_name=args.module_name
    enable_call_run=True if module_name.lower()!="none" else False
    batch_size=args.batch_size
   
    logging.info("module_name=      %s" % module_name)    
    logging.info("batch_size=       %s" % batch_size)
    logging.info("polling_interval= %s (seconds)" % args.polling_interval)
    logging.info("json to stdout=   %s" % enable_json)
        
    # SETUP PRIVATE QUEUE
    try:
        conn = boto.connect_sqs()  
        queue_name=gen_queue_name()
        q=conn.create_queue(queue_name)
        q.set_message_class(JSONMessage) 
        
    except Exception,e:
        raise Exception("Creating queue '%s': %s" % (queue_name, str(e)))
示例#31
0
def test_send_batch_operation():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=60)

    # See https://github.com/boto/boto/issues/831
    queue.set_message_class(RawMessage)

    queue.write_batch([
        ("my_first_message", 'test message 1', 0),
        ("my_second_message", 'test message 2', 0),
        ("my_third_message", 'test message 3', 0),
    ])

    messages = queue.get_messages(3)
    messages[0].get_body().should.equal("test message 1")

    # Test that pulling more messages doesn't break anything
    messages = queue.get_messages(2)
示例#32
0
def test_send_message_with_delay():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=60)
    queue.set_message_class(RawMessage)

    body_one = 'this is a test message'
    body_two = 'this is another test message'

    queue.write(queue.new_message(body_one), delay_seconds=60)
    queue.write(queue.new_message(body_two))

    queue.count().should.equal(1)

    messages = conn.receive_message(queue, number_messages=2)
    assert len(messages) == 1
    message = messages[0]
    assert message.get_body().should.equal(body_two)
    queue.count().should.equal(0)
示例#33
0
def test_read_worker_with_parent_process_alive_and_should_exit(os):
    """
    Test read workers exit when parent is alive and shutdown is set
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Setup PPID
    os.getppid.return_value = 1234

    # When I have a parent process, and shutdown is set
    worker = ReadWorker(queue, "foo")
    worker.read_message = Mock()
    worker.shutdown()

    # Then I return from run()
    worker.run().should.be.none
    def __init__(self, profile, queue, hook, dry_run, bin_directory=None):
        logging.basicConfig(level=logging.INFO)
        self.queue = queue
        self.hook = hook
        self.profile = profile
        if bin_directory:
            os.environ["PATH"] = bin_directory + os.pathsep + os.environ["PATH"]
        self.aws_bin = spawn.find_executable('aws')
        self.python_bin = spawn.find_executable('python')

        self.base_cli_command ="{python_bin} {aws_bin} --profile {profile} ".format(
            python_bin=self.python_bin,
            aws_bin=self.aws_bin,
            profile=self.profile)

        self.dry_run = dry_run
        self.ec2_con = boto.connect_ec2()
        self.sqs_con = boto.connect_sqs()
示例#35
0
def test_message_becomes_inflight_when_received():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=2)
    queue.set_message_class(RawMessage)

    body_one = 'this is a test message'
    queue.write(queue.new_message(body_one))
    queue.count().should.equal(1)

    messages = conn.receive_message(queue, number_messages=1)
    queue.count().should.equal(0)

    assert len(messages) == 1

    # Wait
    time.sleep(3)

    queue.count().should.equal(1)
示例#36
0
def test_delete_message():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=60)
    queue.set_message_class(RawMessage)

    queue.write(queue.new_message('this is a test message'))
    queue.write(queue.new_message('this is another test message'))
    queue.count().should.equal(2)

    messages = conn.receive_message(queue, number_messages=1)
    assert len(messages) == 1
    messages[0].delete()
    queue.count().should.equal(1)

    messages = conn.receive_message(queue, number_messages=1)
    assert len(messages) == 1
    messages[0].delete()
    queue.count().should.equal(0)
示例#37
0
def test_send_batch_operation_with_message_attributes():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=60)
    queue.set_message_class(RawMessage)

    message_tuple = ("my_first_message", 'test message 1', 0, {
        'name1': {
            'data_type': 'String',
            'string_value': 'foo'
        }
    })
    queue.write_batch([message_tuple])

    messages = queue.get_messages()
    messages[0].get_body().should.equal("test message 1")

    for name, value in message_tuple[3].items():
        dict(messages[0].message_attributes[name]).should.equal(value)
示例#38
0
def test_publish_to_sqs():
    conn = boto.connect_sns()
    conn.create_topic("some-topic")
    topics_json = conn.get_all_topics()
    topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"][
        "Topics"][0]['TopicArn']

    sqs_conn = boto.connect_sqs()
    sqs_conn.create_queue("test-queue")

    conn.subscribe(topic_arn, "sqs",
                   "arn:aws:sqs:us-east-1:123456789012:test-queue")

    conn.publish(topic=topic_arn, message="my message")

    queue = sqs_conn.get_queue("test-queue")
    message = queue.read(1)
    message.get_body().should.equal('my message')
示例#39
0
def test_worker_processes_discard_tasks_that_exceed_their_visibility_timeout():
    """
    Test worker processes discards tasks that exceed their visibility timeout
    """
    # Setup logging
    logger = logging.getLogger("pyqs")
    del logger.handlers[:]
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue with timeout of 0 that started long ago
    internal_queue = Queue()
    internal_queue.put({
        "queue": queue.id,
        "message": message,
        "start_time": 0,
        "timeout": 0
    })

    # When I process the message
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker.process_message()

    # Then I get an error about exceeding the visibility timeout
    kwargs = json.loads(body)['kwargs']
    msg1 = "Discarding task tests.tasks.index_incrementer with args: [] and kwargs: {} due to exceeding visibility timeout".format(
        kwargs)  # noqa
    logger.handlers[0].messages['warning'][0].lower().should.contain(
        msg1.lower())
示例#40
0
def test_worker_processes_tasks_from_internal_queue():
    """
    Test worker processes read from internal queue
    """
    del task_results[:]

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to queue
    internal_queue = Queue()
    internal_queue.put({
        "message": message,
        "queue": queue.id,
        "start_time": time.time(),
        "timeout": 30
    })

    # Process message
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker.process_message()

    task_results.should.equal(['Test message'])

    # We expect the queue to be empty now
    try:
        internal_queue.get(timeout=1)
    except Empty:
        pass
    else:
        raise AssertionError("The internal queue should be empty")
示例#41
0
def test_worker_processes_tasks_and_logs_correctly():
    """
    Test worker processes logs INFO correctly
    """
    # Setup logging
    logger = logging.getLogger("pyqs")
    del logger.handlers[:]
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue()
    internal_queue.put({
        "queue": queue.id,
        "message": message,
        "start_time": time.time(),
        "timeout": 30
    })

    # Process message
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker.process_message()

    # Check output
    kwargs = json.loads(body)['kwargs']
    expected_result = u"Processed task tests.tasks.index_incrementer in 0.0000 seconds with args: [] and kwargs: {}".format(
        kwargs)
    logger.handlers[0].messages['info'].should.equal([expected_result])
示例#42
0
def test_read_worker_with_parent_process_dead_and_should_not_exit(os):
    """
    Test read workers exit when parent is dead and shutdown is not set
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Setup PPID
    os.getppid.return_value = 1

    # Setup internal queue
    q = Queue(1)

    # When I have no parent process, and shutdown is not set
    worker = ReadWorker(queue, q, BATCHSIZE)
    worker.read_message = Mock()

    # Then I return from run()
    worker.run().should.be.none
示例#43
0
def test_change_message_visibility_on_visible_message():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=1)
    queue.set_message_class(RawMessage)

    queue.write(queue.new_message('this is another test message'))
    queue.count().should.equal(1)
    messages = conn.receive_message(queue, number_messages=1)

    assert len(messages) == 1

    original_message = messages[0]

    queue.count().should.equal(0)

    time.sleep(2)

    queue.count().should.equal(1)

    original_message.change_visibility.when.called_with(100).should.throw(SQSError)
示例#44
0
def make_multi_sqs_queue(queue_names,
                         get_queue_name_for_zoom,
                         redis_client,
                         is_seeding=False,
                         aws_access_key_id=None,
                         aws_secret_access_key=None):

    conn = connect_sqs(aws_access_key_id, aws_secret_access_key)

    sqs_queues = []
    for queue_name in queue_names:
        aws_queue = conn.get_queue(queue_name)
        assert aws_queue is not None, \
            'Could not get sqs queue with name: %s' % queue_name
        aws_queue.set_message_class(RawMessage)
        sqs_queues.append(aws_queue)

    result = MultiSqsQueue(sqs_queues, get_queue_name_for_zoom, redis_client,
                           is_seeding)
    return result
示例#45
0
def test_custom_function_path():
    """
    Test delaying task with custom function path
    """
    conn = boto.connect_sqs()

    custom_path_task.delay()

    all_queues = conn.get_all_queues()
    queue = all_queues[0]
    queue.name.should.equal("foobar")
    queue.count().should.equal(1)

    message = queue.get_messages()[0].get_body()
    message_dict = json.loads(message)
    message_dict.should.equal({
        'task': 'custom_function.path',
        'args': [],
        'kwargs': {},
    })
示例#46
0
def test_manager_worker_with_queue_prefix():
    """
    Test managing process can find queues by prefix
    """
    conn = boto.connect_sqs()
    conn.create_queue("email.foobar")
    conn.create_queue("email.baz")

    manager = ManagerWorker(queue_prefixes=['email.*'],
                            worker_concurrency=1,
                            interval=1,
                            batchsize=10)

    len(manager.reader_children).should.equal(2)
    children = manager.reader_children
    # Pull all the read children and sort by name to make testing easier
    sorted_children = sorted(children, key=lambda child: child.sqs_queue.name)

    sorted_children[0].sqs_queue.name.should.equal("email.baz")
    sorted_children[1].sqs_queue.name.should.equal("email.foobar")
示例#47
0
def watch_cluster():
    """Give real-time updates on what is happening aboard our cluster"""
    #Make it pretty, or pretty trippy
    range_plus = lambda ri, re, s: [str(i) + s for i in range(ri, re)]
    styles = [
        "\033[" + ''.join(style)
        for style in itertools.product(range_plus(
            0, 3, ';'), range_plus(30, 38, ';'), range_plus(40, 48, 'm'))
    ]
    #Make it work
    sqs = boto.connect_sqs()
    status_queue = sqs.get_queue(STATUS_QUEUE)
    ec2 = boto.connect_ec2()
    print("Starting cluster watch, ^c to stop")
    while True:  #quit via ^C
        try:
            # Gather and report messages
            if status_queue.count() > 0:
                while True:
                    msg = status_queue.read()
                    body = msg.get_body()
                    last_ip = int(body.split('-')[1].split('.')[-1])
                    style = styles[last_ip % len(styles)]
                    print(style + body)
                    status_queue.delete_message(msg)
            # Make sure some instances are running
            running_instances = ec2.get_all_instances(
                filters=({
                    'instance-state-code': 0,
                    'instance-state-code': 16
                }))
            if len(running_instances) == 0:
                print("\nNo running instances found")
                break
            # Don't hammer the connection
            time.sleep(3)
        except KeyboardInterrupt:  #^c pressed
            print("\nMy watch has ended")
            break
        except AttributeError:  #no message to read body from
            pass
示例#48
0
def test_message_attributes():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=2)
    queue.set_message_class(RawMessage)

    body_one = 'this is another test message'
    queue.write(queue.new_message(body_one))

    queue.count().should.equal(1)

    messages = conn.receive_message(queue, number_messages=1)
    queue.count().should.equal(0)

    assert len(messages) == 1

    message_attributes = messages[0].attributes

    assert message_attributes.get('ApproximateFirstReceiveTimestamp')
    assert int(message_attributes.get('ApproximateReceiveCount')) == 1
    assert message_attributes.get('SentTimestamp')
    assert message_attributes.get('SenderId')
示例#49
0
def test_read_worker_with_parent_process_alive_and_should_not_exit(os):
    """
    Test read workers do not exit when parent is alive and shutdown is not set
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Setup PPID
    os.getppid.return_value = 1234

    # Setup dummy read_message
    def read_message():
        raise Exception("Called")

    # When I have a parent process, and shutdown is not set
    worker = ReadWorker(queue, "foo", BATCHSIZE)
    worker.read_message = read_message

    # Then read_message() is reached
    worker.run.when.called_with().should.throw(Exception, "Called")
示例#50
0
def test_publish_to_sqs():
    conn = boto.connect_sns()
    conn.create_topic("some-topic")
    topics_json = conn.get_all_topics()
    topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0][
        "TopicArn"
    ]

    sqs_conn = boto.connect_sqs()
    sqs_conn.create_queue("test-queue")

    conn.subscribe(
        topic_arn, "sqs", "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID)
    )

    message_to_publish = "my message"
    subject_to_publish = "test subject"
    with freeze_time("2015-01-01 12:00:00"):
        published_message = conn.publish(
            topic=topic_arn, message=message_to_publish, subject=subject_to_publish
        )
    published_message_id = published_message["PublishResponse"]["PublishResult"][
        "MessageId"
    ]

    queue = sqs_conn.get_queue("test-queue")
    with freeze_time("2015-01-01 12:00:01"):
        message = queue.read(1)
    expected = MESSAGE_FROM_SQS_TEMPLATE % (
        message_to_publish,
        published_message_id,
        subject_to_publish,
        "us-east-1",
    )
    acquired_message = re.sub(
        r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z",
        "2015-01-01T12:00:00.000Z",
        message.get_body(),
    )
    acquired_message.should.equal(expected)
示例#51
0
def test_worker_processes_tasks_and_logs_warning_correctly():
    """
    Test worker processes logs WARNING correctly
    """
    # Setup logging
    logger = logging.getLogger("pyqs")
    del logger.handlers[:]
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue()
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # Process message
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker.process_message()

    # Check output
    kwargs = json.loads(body)['kwargs']
    msg1 = "Task tests.tasks.index_incrementer raised error in 0.0000 seconds: with args: [] and kwargs: {}: Traceback (most recent call last)".format(kwargs)  # noqa
    logger.handlers[0].messages['error'][0].lower().should.contain(msg1.lower())
    msg2 = 'raise ValueError("Need to be given basestring, was given {}".format(message))\nValueError: Need to be given basestring, was given 23'  # noqa
    logger.handlers[0].messages['error'][0].lower().should.contain(msg2.lower())
示例#52
0
def test_master_spawns_worker_processes():
    """
    Test managing process creates child workers
    """

    # Setup SQS Queue
    conn = boto.connect_sqs()
    conn.create_queue("tester")

    # Setup Manager
    manager = ManagerWorker(["tester"], 1)
    manager.start()

    # Check Workers
    len(manager.reader_children).should.equal(1)
    len(manager.worker_children).should.equal(1)

    manager.reader_children[0].is_alive().should.be.true
    manager.worker_children[0].is_alive().should.be.true

    # Cleanup
    manager.stop()
示例#53
0
def test_worker_processes_only_increases_processed_counter_if_a_message_was_processed():
    """
    Test worker process only increases processed counter if a message was processed
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # And we add a message to the queue later
    def sleep_and_queue(internal_queue):
        time.sleep(1)
        internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    thread = threading.Thread(target=sleep_and_queue, args=(internal_queue,))
    thread.daemon = True
    thread.start()

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run() after processing 2 messages
    worker.run().should.be.none
示例#54
0
def test_send_message_with_attributes():
    conn = boto.connect_sqs('the_key', 'the_secret')
    queue = conn.create_queue("test-queue", visibility_timeout=60)
    queue.set_message_class(RawMessage)

    body = 'this is a test message'
    message = queue.new_message(body)
    message_attributes = {
        'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'},
        'test.binary_attribute': {'data_type': 'Binary', 'binary_value': 'binary value'},
        'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'}
    }
    message.message_attributes = message_attributes

    queue.write(message)

    messages = conn.receive_message(queue)

    messages[0].get_body().should.equal(body)

    for name, value in message_attributes.items():
        dict(messages[0].message_attributes[name]).should.equal(value)
示例#55
0
    def test_1_basic(self):
        c = boto.connect_sqs()

        # create a queue so we can test BigMessage
        queue_name = 'test%d' % int(time.time())
        timeout = 60
        queue = c.create_queue(queue_name, timeout)
        self.addCleanup(c.delete_queue, queue, True)
        queue.set_message_class(BigMessage)

        # create a bucket with the same name to store the message in
        s3 = boto.connect_s3()
        bucket = s3.create_bucket(queue_name)
        self.addCleanup(s3.delete_bucket, queue_name)
        time.sleep(30)

        # now add a message
        msg_body = 'This is a test of the big message'
        fp = StringIO(msg_body)
        s3_url = 's3://%s' % queue_name
        message = queue.new_message(fp, s3_url=s3_url)

        queue.write(message)
        time.sleep(30)

        s3_object_name = message.s3_url.split('/')[-1]

        # Make sure msg body is in bucket
        self.assertTrue(bucket.lookup(s3_object_name))

        m = queue.read()
        self.assertEqual(m.get_body().decode('utf-8'), msg_body)

        m.delete()
        time.sleep(30)

        # Make sure msg is deleted from bucket
        self.assertIsNone(bucket.lookup(s3_object_name))
示例#56
0
def test_basic_delay():
    """
    Test delaying task to default queue
    """
    conn = boto.connect_sqs()
    conn.create_queue("tests.tasks.index_incrementer")

    index_incrementer.delay("foobar", **{'extra': 'more'})

    all_queues = conn.get_all_queues()
    len(all_queues).should.equal(1)

    queue = all_queues[0]
    queue.name.should.equal("tests.tasks.index_incrementer")
    queue.count().should.equal(1)

    message = queue.get_messages()[0].get_body()
    message_dict = json.loads(message)
    message_dict.should.equal({
        'task': 'tests.tasks.index_incrementer',
        'args': ["foobar"],
        'kwargs': {'extra': 'more'},
    })
示例#57
0
def test_manager_start_and_stop():
    """
    Test managing process can start and stop child processes
    """
    conn = boto.connect_sqs()
    conn.create_queue("email")

    manager = ManagerWorker(queue_prefixes=['email'], worker_concurrency=2)

    len(manager.worker_children).should.equal(2)

    manager.worker_children[0].is_alive().should.equal(False)
    manager.worker_children[1].is_alive().should.equal(False)

    manager.start()

    manager.worker_children[0].is_alive().should.equal(True)
    manager.worker_children[1].is_alive().should.equal(True)

    manager.stop()

    manager.worker_children[0].is_alive().should.equal(False)
    manager.worker_children[1].is_alive().should.equal(False)
示例#58
0
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages():
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
示例#59
0
import simplejson, boto, uuid
sqs = boto.connect_sqs()
q = sqs.create_queue('my_msg_pump')
q.write(q.new_message(body=simplejson.dumps(['foo'])))
示例#60
0
 def __init__(self, queue_name):
     self.sqs = boto.connect_sqs()
     self.queue = self.sqs.lookup(queue_name)