Exemplo n.º 1
0
    def test_queue_deletion_affects_full_queues(self):
        conn = SQSConnection()
        initial_count = len(conn.get_all_queues())

        empty = conn.create_queue('empty%d' % int(time.time()))
        full = conn.create_queue('full%d' % int(time.time()))
        time.sleep(60)
        # Make sure they're both around.
        self.assertEqual(len(conn.get_all_queues()), initial_count + 2)

        # Put a message in the full queue.
        m1 = Message()
        m1.set_body('This is a test message.')
        full.write(m1)
        self.assertEqual(full.count(), 1)

        self.assertTrue(conn.delete_queue(empty))
        # Here's the regression for the docs. SQS will delete a queue with
        # messages in it, no ``force_deletion`` needed.
        self.assertTrue(conn.delete_queue(full))
        # Wait long enough for SQS to finally remove the queues.
        time.sleep(90)
        self.assertEqual(len(conn.get_all_queues()), initial_count)
Exemplo n.º 2
0
def test_worker_processes_discard_tasks_that_exceed_their_visibility_timeout():
    """
    Test worker processes discards tasks that exceed their visibility timeout
    """
    # Setup logging
    logger = logging.getLogger("pyqs")
    del logger.handlers[:]
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue with timeout of 0 that started long ago
    internal_queue = Queue()
    internal_queue.put({"queue": queue.id, "message": message, "start_time": 0, "timeout": 0})

    # When I process the message
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker.process_message()

    # Then I get an error about exceeding the visibility timeout
    kwargs = json.loads(body)['kwargs']
    msg1 = "Discarding task tests.tasks.index_incrementer with args: [] and kwargs: {} due to exceeding visibility timeout".format(kwargs)  # noqa
    logger.handlers[0].messages['warning'][0].lower().should.contain(msg1.lower())
Exemplo n.º 3
0
def test_worker_processes_tasks_and_logs_correctly():
    """
    Test worker processes logs INFO correctly
    """
    # Setup logging
    logger = logging.getLogger("pyqs")
    del logger.handlers[:]
    logger.handlers.append(MockLoggingHandler())

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue()
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # Process message
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker.process_message()

    # Check output
    kwargs = json.loads(body)['kwargs']
    expected_result = u"Processed task tests.tasks.index_incrementer in 0.0000 seconds with args: [] and kwargs: {}".format(kwargs)
    logger.handlers[0].messages['info'].should.equal([expected_result])
Exemplo n.º 4
0
def readtaskSQS(filename, queue_name, process_queue):
    aws_conn = boto.sqs.connect_to_region(
        "us-east-1",
        aws_access_key_id='{aws_access_key_id}',
        aws_secret_access_key='{aws_secret_access_key}')
    dynamo_conn = boto.dynamodb.connect_to_region(
        "us-east-1",
        aws_access_key_id='{aws_access_key_id}',
        aws_secret_access_key='{aws_secret_access_key}')
    SQS_queue = aws_conn.get_queue(queue_name)
    SQS_process_queue = aws_conn.get_queue(process_queue)
    task_id = 0

    try:
        task_table_schema = conn_dynamo.create_schema(hash_key_name='task_id',
                                                      hash_key_proto_value=str)
        table = conn_dynamo.create_table(name='Dynamo_Table',
                                         schema=task_table_schema,
                                         read_units=10,
                                         write_units=10)
        print 'Table Dynamo_Table has been created'
    except Exception as e:
        print 'Dynamo_Table already exists.'

    with open(filename) as f:
        task_list = f.readlines()

    for i in task_list:
        msg = Message()
        json_msg = {}
        json_msg["task_id"] = task_id
        json_msg["task"] = i
        msg.set_body(json.dumps(json_msg))
        SQS_queue.write(msg)
        task_id = task_id + 1

    return SQS_queue, SQS_process_queue, task_id
Exemplo n.º 5
0
def broadcast_alert(ipaddr, ipport, mx_pk):
    from boto.sqs.connection import SQSConnection
    from boto.sqs.message import Message

    conn = SQSConnection('ID', 'KEY')

    sg_obj = ProbeStatus.objects.get(probe='sg')
    uk_obj = ProbeStatus.objects.get(probe='uk')
    us_obj = ProbeStatus.objects.get(probe='us')

    q_uk = conn.get_queue('helomx_to_uk')
    q_sg = conn.get_queue('helomx_to_sg')
    q_us = conn.get_queue('helomx_to_us')

    m = Message()
    host_to_send = "%s:%s:%s" % (ipaddr, ipport, mx_pk)
    m.set_body(host_to_send)

    if uk_obj.status == 'up':
        q_uk.write(m)
    if sg_obj.status == 'up':
        q_sg.write(m)
    if us_obj.status == 'up':
        q_us.write(m)
Exemplo n.º 6
0
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages():
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
Exemplo n.º 7
0
def imageProcess(cID, jID):
    try:
        os.system("./image.sh")
        file_name = str(cID) + str(jID) +".mpg"
        tempcommand = 'mv output.mpg ' + file_name
        os.system(tempcommand)
        print 'Uploading File'
        k = Key(bucket)
        k.Key = file_name
        k.set_contents_from_filename(file_name)
        k.set_canned_acl('public-read')
        url = k.generate_url(0, query_auth=False, force_http=True)
        print url
        m1 = Message()
        j = responseMessage(cID, jID, url)
        m1.set_body(j)
        processed_queue.write(m1)
        tempcommand = 'rm -rf ' + file_name
        os.system(tempcommand)
        global  imageCount
        imageCount = 0
    except Exception as e:
        print 'Interrupted' + str(e)
        imageCount = 0
Exemplo n.º 8
0
 def add(self, message):
     message = to_data(message)
     m = Message()
     m.set_body(value2json(message))
     self.queue.write(m)
Exemplo n.º 9
0
def sqsput(filename):
    print filename
    m = Message()
    m.set_body(filename)
    q.write(m)
Exemplo n.º 10
0
def write_message_to_queue(queue):
    m = Message()
    m.set_body(ipgetter.myip())
    queue.write(m)
    print m
Exemplo n.º 11
0
    def do_activity(self, data=None):
        """
        Do the work
        """
        if self.logger:
            self.logger.info('data: %s' %
                             json.dumps(data, sort_keys=True, indent=4))

        run = data['run']
        session = Session(self.settings)
        version = session.get_value(run, 'version')
        article_id = session.get_value(run, 'article_id')

        self.emit_monitor_event(
            self.settings, article_id, version, run, self.pretty_name, "start",
            "Starting preparation of article for EIF " + article_id)

        try:
            eif_location = session.get_value(run, 'eif_location')
            eif_bucket = self.settings.publishing_buckets_prefix + self.settings.eif_bucket

            article_path = session.get_value(run, 'article_path')
            self.set_monitor_property(self.settings,
                                      article_id,
                                      'path',
                                      article_path,
                                      'text',
                                      version=version)

            expanded_folder = session.get_value(run, 'expanded_folder')
            status = session.get_value(run, 'status')

            update_date = session.get_value(run, 'update_date')

            carry_over_data = {
                'eif_location': eif_location,
                'eif_bucket': eif_bucket,
                'passthrough': {
                    'article_id': article_id,
                    'version': version,
                    'run': run,
                    'article_path': article_path,
                    'expanded_folder': expanded_folder,
                    'status': status,
                    'update_date': update_date,
                }
            }

            message = carry_over_data

            sqs_conn = boto.sqs.connect_to_region(
                self.settings.sqs_region,
                aws_access_key_id=self.settings.aws_access_key_id,
                aws_secret_access_key=self.settings.aws_secret_access_key)

            out_queue = sqs_conn.get_queue(self.settings.website_ingest_queue)
            m = Message()
            m.set_body(json.dumps(message))
            out_queue.write(m)

            #########

        except Exception as e:
            self.logger.exception("Exception when Preparing for PostEIF")
            self.emit_monitor_event(
                self.settings, article_id, version, run, self.pretty_name,
                "error", "Error submitting EIF For article" + article_id +
                " message:" + str(e.message))
            return False

        self.emit_monitor_event(
            self.settings, article_id, version, run, self.pretty_name, "end",
            "Finished preparation of article for EIF " + article_id)
        return True
Exemplo n.º 12
0
 def _put(self, queue, message, **kwargs):
     """Put message onto queue."""
     q = self._new_queue(queue)
     m = Message()
     m.set_body(dumps(message))
     q.write(m)
Exemplo n.º 13
0
 def publish(self, events):
     for event in events:
         for name, queue in self.queues.iteritems():
             queue.write(Message(body=json.dumps(event)))
Exemplo n.º 14
0
q1 = c.create_queue(qnm1)  #connection to the queue is established
st = time.time()  #start time is noted
m = q.get_messages(1)  #message is fetched
c1 = boto.boto.dynamodb.connect_to_region('us-west-2',
                                          aws_access_key_id=k_id,
                                          aws_secret_access_key=key)
t = c1.get_table(tnm)  #connection to the db is established
data = {'value': 'Done'}  #data is set
while (len(m) != 0):  #checking whether the queue is empty

    i = (m[0].get_body())
    k = i.split()  #the message is split to extract id
    #print (i)
    try:
        t.get_item(hash_key=k[0])  #trying to fetch value from db
    except boto.dynamodb.exceptions.DynamoDBKeyNotFoundError:
        #print "New Item Found"
        item = t.new_item(hash_key=k[0], attrs=data)
        m1 = Message()
        m1.set_body(k[0] + " " + "T")
        q1.write(m1)  #data put to response queue
        item.put()
        q.delete_message(m[0])
        #delete message from instruction queue
        os.system(k[1] + ' ' + str(float(k[2]) / 1000))
        #the instruction is executed

    m = q.get_messages(1)
en = time.time()
print(en - st)  # the time taken is displayed
Exemplo n.º 15
0
def test_master_shuts_down_busy_process_workers():
    """
    Test managing process properly cleans up busy Process Workers
    """
    # For debugging test
    import sys
    logger = logging.getLogger("pyqs")
    logger.setLevel(logging.DEBUG)
    stdout_handler = logging.StreamHandler(sys.stdout)
    logger.addHandler(stdout_handler)

    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Add Slow tasks
    message = Message()
    body = json.dumps({
        'task': 'tests.tasks.sleeper',
        'args': [],
        'kwargs': {
            'message': 5,
        },
    })
    message.set_body(body)

    # Fill the queue (we need a lot of messages to trigger the bug)
    for _ in range(20):
        queue.write(message)

    # Create function to watch and kill stuck processes
    def sleep_and_kill(pid):
        import os
        import signal
        import time
        # This sleep time is long enoug for 100 messages in queue
        time.sleep(5)
        try:
            os.kill(pid, signal.SIGKILL)
        except OSError:
            # Return that we didn't need to kill the process
            return True
        else:
            # Return that we needed to kill the process
            return False

    # Setup Manager
    manager = ManagerWorker(queue_prefixes=["tester"],
                            worker_concurrency=1,
                            interval=0.0,
                            batchsize=1)
    manager.start()

    # Give our processes a moment to start
    time.sleep(1)

    # Setup Threading watcher
    try:
        # Try Python 2 Style
        thread = ThreadWithReturnValue2(
            target=sleep_and_kill, args=(manager.reader_children[0].pid, ))
        thread.daemon = True
    except TypeError:
        # Use Python 3 Style
        thread = ThreadWithReturnValue3(
            target=sleep_and_kill,
            args=(manager.reader_children[0].pid, ),
            daemon=True)

    thread.start()

    # Stop the Master Process
    manager.stop()

    # Check if we had to kill the Reader Worker or it exited gracefully
    return_value = thread.join()
    if not return_value:
        raise Exception("Reader Worker failed to quit!")
Exemplo n.º 16
0
 def submit_event(self, data):
     m = Message()
     m.set_body(data)
     self.q.write(m)
Exemplo n.º 17
0
def sqs_release(jobid, attempts=0):
    queue = get_sqs_queue()
    m = Message()
    m.set_body('release ' + jobid.__str__() + " " + attempts.__str__())
    queue.write(m)     
Exemplo n.º 18
0
def sqs_start(jobid, attempts=0):
    queue = get_sqs_queue()
    m = Message()
    m.set_body('start ' + jobid.__str__() + " " + attempts.__str__())
    queue.write(m)
Exemplo n.º 19
0
            if msgID == request['msg_id']:
                log_warning("REPLICATED msg_in: " + request['msg_id'])
                qout.write(msgIDList[msgID])
                duplicated_flag = 1
                break

        if duplicated_flag == 1: continue
        if request['method'] == None:
            log_fail("INVALID METHOD ID " + request['msg_id'])
            continue

        #print opnum of the request
        log_back("###### EXPECTED OP# : " + str(nextTopNum))
        log_back("!!!!!! REQUEST OP# : " + str(request['opnum']))

        msg_out = Message()  #creating empty message object for msg_out
        key = request['msg_id']  #assigning msg_id to key
        #print "line 413", key
        method = request['method']
        if method == constants.METHOD_POST_USER:
            rqresult = create_user(table, request)
            #rqresult['backend'] = args.suffix
            write_out_msg(rqresult)
            log_back("#####	created user    #####\n")

        elif method == constants.METHOD_GET_USER_BY_ID:
            rqresult = retrieve_by_id(table, request)
            #rqresult['backend'] = args.suffix
            write_out_msg(rqresult)
            log_back("#####	got user by id 	 #####\n")
Exemplo n.º 20
0
 def add(self, name, **spider_args):
     d = spider_args.copy()
     d['name'] = name
     msg = Message(body=json.dumps(d))
     return threads.deferToThread(self._queue_method, 'write', msg)
Exemplo n.º 21
0
 def send_message(self, queue_name=None, msg=None):
     q = self.conn.lookup(queue_name)
     m = Message()
     m.set_body(msg)
     q.write(m)
 def on_data(self, data):
     msg = pickle.dumps(data)
     m = Message()
     m.set_body(msg)
     status = q[0].write(m)
     return True
Exemplo n.º 23
0
def write_message_to_queue(queue):
    m = Message()
    m.set_body('request_ip')
    queue.write(m)
    print m
Exemplo n.º 24
0
def test_single_path_document_zip(registry_mock, connection_mock,
                                  document_mock, job_mock, parent_url_mock,
                                  get_document_mock):
    conn_s3 = boto.connect_s3()
    bucket = conn_s3.create_bucket('storage-bucket')
    conn_s3.create_bucket('files-bucket')

    registry = MagicMock()
    registry.get = MagicMock(side_effect=side_effect)

    registry_mock.return_value = registry

    message = Message()
    message.set_body(
        json.dumps({
            'Message': '9bd96ca7-3d0a-4e74-b523-b3bd38e9862e',
            'Subject': 'Test Subject'
        }))

    job = MagicMock(
        **{
            'uuid': '9bd96ca7-3d0a-4e74-b523-b3bd38e9862e',
            'name': 'Migration Download',
            'status': 'pending',
            'message': {
                'documents': [{
                    'parent_id':
                    '56d3c182-f72f-4216-9e94-1756bf67564d'
                }]
            }
        })
    job.set = MagicMock()

    document_mock.query.return_value = [
        MagicMock(
            **{
                'uuid': '56d3c182-f72f-4216-9e94-1756bf67564d',
                'created': datetime(2015, 5, 17),
                'url': '/'
            }),
        MagicMock(
            **{
                'uuid': '79254d0b-0902-4697-89d1-4be8ff3acd69',
                'created': datetime(2015, 5, 17),
                'url': 'test',
                'type': 'File'
            })
    ]

    key1 = Key(bucket, '17/5/2015/56d3c182-f72f-4216-9e94-1756bf67564d')
    key1.set_contents_from_string(
        json.dumps({
            'document': {
                'id': 1,
                'uuid': '56d3c182-f72f-4216-9e94-1756bf67564d',
                'created': str(datetime(2015, 5, 17)),
                'url': '/',
                'parent': 0,
                'path': '1/',
            }
        }))

    key2 = Key(bucket, '17/5/2015/79254d0b-0902-4697-89d1-4be8ff3acd69')
    key2.set_contents_from_string(
        json.dumps({
            'document': {
                'id': 2,
                'uuid': '79254d0b-0902-4697-89d1-4be8ff3acd69',
                'created': str(datetime(2015, 5, 17)),
                'url': 'test',
                'parent': 1,
                'path': '1/2',
                'type': 'File'
            },
            'file': {
                "bucket": "storage-bucket",
                'key': '17/5/2015/a984dea7-8140-44cb-80a0-7e832ff1ff19'
            }
        }))

    key3 = Key(bucket, '17/5/2015/a984dea7-8140-44cb-80a0-7e832ff1ff19')
    key3.set_contents_from_string('Hello World')

    job_mock.selectBy.return_value.getOne.return_value = job

    service = MigrationDownloadJob()

    get_document_mock.return_value = IN('uuid', [
        '56d3c182-f72f-4216-9e94-1756bf67564d',
        '79254d0b-0902-4697-89d1-4be8ff3acd69'
    ])

    def parent_side_effects(parent_id):
        return MagicMock(
            url='/',
            uuid='56d3c182-f72f-4216-9e94-1756bf67564d') if parent_id else None

    parent_url_mock.side_effect = parent_side_effects

    service.do_work(message)

    key = Key(bucket, '9bd96ca7-3d0a-4e74-b523-b3bd38e9862e')
    contents = StringIO(key.get_contents_as_string())
    handle = zipfile.ZipFile(contents, 'r', compression=zipfile.ZIP_DEFLATED)
    assert key.exists()
    assert handle.namelist() == [
        '56d3c182-f72f-4216-9e94-1756bf67564d',
        '79254d0b-0902-4697-89d1-4be8ff3acd69',
        '17/5/2015/a984dea7-8140-44cb-80a0-7e832ff1ff19', 'manifest'
    ]
    assert job.set.call_args_list == [
        call(status='running'),
        call(message={
            'documents': [{
                'parent_id': '56d3c182-f72f-4216-9e94-1756bf67564d'
            }],
            'download': {
                'key': '9bd96ca7-3d0a-4e74-b523-b3bd38e9862e',
                'bucket': 'storage-bucket'
            }
        },
             status='complete')
    ]
Exemplo n.º 25
0
parser.add_argument("qname")

args = parser.parse_args()

 

conn = boto.sqs.connect_to_region("us-west-2a")

q = conn.get_queue(args.qname)

 

try:

	m = Message()

	m = q.read(60)

	str1 = m.get_body()

	print "Message read = ",  str1

except:

	print "Could not read message"




Exemplo n.º 26
0
def publish_to_sqs(data): 
    m = Message() 
    m.set_body(data) 
    status = q[0].write(m) 
    return status 
Exemplo n.º 27
0
import boto.sqs
from boto.sqs.message import Message
import os
import config

conn = boto.sqs.connect_to_region("us-west-2",
                                  aws_access_key_id=config.sqs_access_key,
                                  aws_secret_access_key=config.sqs_access_secret)

queue = conn.get_queue("Image")
queuepkg = conn.get_queue("Package")


while (True):
    messages = queue.get_messages()
    for mes in messages:
        dname = mes.get_body()
        print dname
        res = os.popen('./script.sh ' + dname)
        for i in range(4):
           res.readline()
        pkglist = res.read()
	# print pkglist
        # txt = open("file.txt", "r")
        # pkglist = txt.read()
        mes = Message()
        mes.set_body(pkglist)
        queuepkg.write(mes)
    if (len(messages) != 0):
        queue.delete_message_batch(messages)
Exemplo n.º 28
0
def writeToSQS(messageBody):
    # Note that messages are base64 encoded.
    m1 = Message()
    m1.set_body(messageBody)
    q.write(m1)
Exemplo n.º 29
0
 def emit(self, record):
     m = Message()
     m.set_body(record.msg)
     self.q.write(m)
Exemplo n.º 30
0
def send(message):
    global q
    m = Message()
    m.set_body(message)
    q.write(m)