示例#1
0
def start(queue, profile=None, tag='salt/engine/sqs'):
    '''
    Listen to events and write them to a log file
    '''
    if __opts__.get('__role') == 'master':
        fire_master = salt.utils.event.get_master_event(
            __opts__, __opts__['sock_dir']).fire_event
    else:
        fire_master = None

    def fire(tag, msg):
        if fire_master:
            fire_master(msg, tag)
        else:
            __salt__['event.send'](tag, msg)

    sqs = _get_sqs_conn(profile)
    q = sqs.get_queue(queue)

    while True:
        if not q:
            log.warning('failure connecting to queue: {0}, '
                        'waiting 10 seconds.'.format(queue))
            time.sleep(10)
            q = sqs.get_queue(queue)
            if not q:
                continue
        msgs = q.get_messages(wait_time_seconds=20)
        for msg in msgs:
            fire(tag, {'message': msg.get_body()})
            msg.delete()
示例#2
0
文件: sqs_events.py 项目: DaveQB/salt
def start(queue, profile=None, tag='salt/engine/sqs'):
    '''
    Listen to events and write them to a log file
    '''
    if __opts__.get('__role') == 'master':
        fire_master = salt.utils.event.get_master_event(
            __opts__,
            __opts__['sock_dir'],
            listen=False).fire_event
    else:
        fire_master = None

    def fire(tag, msg):
        if fire_master:
            fire_master(msg, tag)
        else:
            __salt__['event.send'](tag, msg)

    sqs = _get_sqs_conn(profile)
    q = sqs.get_queue(queue)

    while True:
        if not q:
            log.warning('failure connecting to queue: {0}, '
                        'waiting 10 seconds.'.format(queue))
            time.sleep(10)
            q = sqs.get_queue(queue)
            if not q:
                continue
        msgs = q.get_messages(wait_time_seconds=20)
        for msg in msgs:
            fire(tag, {'message': msg.get_body()})
            msg.delete()
def report_completion():
    # DRY warning: upload_wrappers.py.
    installer_location = file('/home/lantern/wrapper_location').read()
    sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds)
    logging.info("Reporting installers for %s are ready at %s."
                 % (clip_email(USERID), installer_location))
    ctrl_req_q = sqs.get_queue("%s_request" % CONTROLLER)
    ctrl_notify_q = sqs.get_queue("notify_%s" % CONTROLLER)
    msg = JSONMessage()
    msg.set_body(
            {'fp-up-user': USERID,
             'fp-up-instance': INSTANCEID,
             'fp-up-insloc': installer_location,
             'fp-up-ip': IP,
             'fp-up-port': PORT,
             # TRANSITION: keep supporting old controllers for a while to make
             # deployment less timing sensitive.
             'invsrvup-user': USERID,
             'invsrvup-insloc': installer_location})
    ctrl_notify_q.write(msg)
    DEL_FLAG = '/home/lantern/deleted_sqs_message'
    if not os.path.exists(DEL_FLAG):
        to_delete = loads(b64decode(SQSMSG))
        ctrl_req_q.delete_message(to_delete)
        file(DEL_FLAG, 'w').write('OK')
    file('/home/lantern/reported_completion', 'w').write('OK')
示例#4
0
def trigger_launch():
    aws_id, aws_key = util.read_aws_credential()
    aws_creds = {'aws_access_key_id': aws_id, 'aws_secret_access_key': aws_key}
    sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds)
    req_q = sqs.get_queue("%s_request" % config.controller)
    notify_q = sqs.get_queue("notify_%s" % config.controller)
    for q in [req_q, notify_q]:
        q.set_message_class(JSONMessage)
    msg = JSONMessage()
    msg.set_body({
        'launch-fp-as': '*****@*****.**',
        'launch-refrtok': '<redacted>',
        'launch-serial': 1
    })
    print "Sending request..."
    req_q.write(msg)
    return  # Comment out to wait for response!
    print "Awaiting response..."
    while True:
        msg = notify_q.read()
        if msg is not None:
            print "Got message: %r" % msg.get_body()
            notify_q.delete_message(msg)
            return
        sys.stdout.write(".")
        sys.stdout.flush()
示例#5
0
def start(queue, profile=None, tag='salt/engine/sqs'):
    '''
    Listen to events and write them to a log file
    '''
    if __opts__.get('__role') == 'master':
        fire_master = salt.utils.event.get_master_event(
            __opts__,
            __opts__['sock_dir'],
            listen=False).fire_event
    else:
        fire_master = None

    message_format = __opts__.get('sqs.message_format', None)

    def fire(tag, msg):
        if fire_master:
            fire_master(msg, tag)
        else:
            __salt__['event.send'](tag, msg)

    sqs = _get_sqs_conn(profile)
    q = sqs.get_queue(queue)
    q.set_message_class(boto.sqs.message.RawMessage)

    rate_limiter = RateLimiter(5)
    while True:
        if not q:
            log.warning('failure connecting to queue: {0}, '
                        'waiting 10 seconds.'.format(queue))
            time.sleep(10)
            q = sqs.get_queue(queue)
            if not q:
                continue

        try:
            msgs = q.get_messages(wait_time_seconds=20)
        except TypeError:
            # Older versions of boto (such as 2.2.2 included with Ubuntu 12.04)
            # doesn't support long polling. Notice that the `rate_limiter` we don't start pounding AWS.
            msgs = q.get_messages()
        for msg in msgs:
            if message_format == "json":
                sqsmessage = json.loads(msg.get_body())
                try:
                    sqsmessage['Message'] = json.loads(sqsmessage['Message'])
                except ValueError:
                    pass
                fire(tag, {'message': sqsmessage})
            else:
                fire(tag, {'message': msg.get_body()})
            msg.delete()
        else:
            rate_limiter.acquire()
示例#6
0
def start(queue, profile=None, tag="salt/engine/sqs", owner_acct_id=None):
    """
    Listen to sqs and fire message on event bus
    """
    if __opts__.get("__role") == "master":
        fire_master = salt.utils.event.get_master_event(
            __opts__, __opts__["sock_dir"], listen=False
        ).fire_event
    else:
        fire_master = __salt__["event.send"]

    message_format = __opts__.get("sqs.message_format", None)

    sqs = _get_sqs_conn(profile)
    q = None
    while True:
        if not q:
            q = sqs.get_queue(queue, owner_acct_id=owner_acct_id)
            q.set_message_class(boto.sqs.message.RawMessage)

        _process_queue(
            q,
            queue,
            fire_master,
            tag=tag,
            owner_acct_id=owner_acct_id,
            message_format=message_format,
        )
示例#7
0
def actually_check_q():
    logging.info("Checking queue...")
    sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds)
    ctrl_req_q = sqs.get_queue("%s_request" % CONTROLLER)
    ctrl_req_q.set_message_class(JSONMessage)
    msg = ctrl_req_q.read()
    if msg is None:
        logging.info("Nothing in request queue.")
        return
    d = msg.get_body()
    # DRY warning: FallbackProxyLauncher at lantern-controller.
    # TRANSITION: support old controllers for a while to make deployment less
    # time sensitive.
    userid = d.get('launch-fp-as', d.get('launch-invsrv-as'))
    if userid:
        # Backwards compatibility: we'll be getting serial numbers starting from 1
        # in the new fallback balancing scheme.  Just in case we get a new proxy
        # launch request from an old controller, let's mark it as 0.
        launch_proxy(userid, d.get('launch-serial', 0), d['launch-refrtok'], msg)
    elif 'shutdown-fp' in d:
        instance_id = d['shutdown-fp']
        logging.info("Got shutdown request for %s" % instance_id)
        nproxies = shutdown_proxy(instance_id)
        if nproxies != 1:
            logging.error("Expected one proxy shut down, got %s" % nproxies)
        ctrl_req_q.delete_message(msg)
    else:
        logging.error("I don't understand this message: %s" % d)
示例#8
0
def start(queue, profile=None, tag='salt/engine/sqs', owner_acct_id=None):
    '''
    Listen to sqs and fire message on event bus
    '''
    if __opts__.get('__role') == 'master':
        fire_master = salt.utils.event.get_master_event(
            __opts__, __opts__['sock_dir'], listen=False).fire_event
    else:
        fire_master = __salt__['event.send']

    message_format = __opts__.get('sqs.message_format', None)

    sqs = _get_sqs_conn(profile)
    q = None
    while True:
        if not q:
            q = sqs.get_queue(queue, owner_acct_id=owner_acct_id)
            q.set_message_class(boto.sqs.message.RawMessage)

        _process_queue(q,
                       queue,
                       fire_master,
                       tag=tag,
                       owner_acct_id=owner_acct_id,
                       message_format=message_format)
示例#9
0
def to_queue(obj, queue_name):
    if environment.local:
        print('[queue] {}'.format(queue_name))
    else:
        queue = sqs.get_queue(queue_name)
        message = RawMessage()
        message.set_body(json.dumps(obj))
        queue.write(message)
 def __init__(self, queue):
     threading.Thread.__init__(self)
     self.daemon = True
     self._queue = queue
     sqs = boto.sqs.connect_to_region(
         Constants.aws_region,
         aws_access_key_id=Constants.aws_access_key,
         aws_secret_access_key=Constants.aws_secret_key)
     self._remote_queue = sqs.get_queue(Constants.aws_sqs_queue_name)
     self._remote_queue.set_message_class(RawMessage)
示例#11
0
def report_error_to_controller(error):
    sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds)
    ctrl_notify_q = sqs.get_queue("notify_%s" % CONTROLLER)
    msg = JSONMessage()
    # DRY: SQSChecker at lantern-controller.
    msg.set_body({'fp-alarm': error,
                  'instance-id': INSTANCEID,
                  'ip': IP,
                  'port': PORT,
                  'send-email': True})
    ctrl_notify_q.write(msg)
示例#12
0
    def get_sqs_queue(self, region_name, queue_name):
        key = (region_name, queue_name)
        if key in self._queues:
            return self._queues[key]

        sqs = self.connect_to('sqs', region_name)
        queue = sqs.get_queue(queue_name)
        if not queue:
            raise RuntimeError('no such queue {} in {}'.format(repr(queue_name), str(region_name)))
        self._queues[key] = queue
        return queue
示例#13
0
文件: aws.py 项目: tp-tc/shipit
    def get_sqs_queue(self, region_name, queue_name):
        key = (region_name, queue_name)
        if key in self._queues:
            return self._queues[key]

        sqs = self.connect_to("sqs", region_name)
        queue = sqs.get_queue(queue_name)
        if not queue:
            raise RuntimeError(f"no such queue {repr(queue_name)} in {str(region_name)}")
        self._queues[key] = queue
        return queue
示例#14
0
def send_message(d):
    aws_id, aws_key = util.read_aws_credential()
    aws_creds = {'aws_access_key_id': aws_id, 'aws_secret_access_key': aws_key}
    sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds)
    req_q = sqs.get_queue("%s_request" % config.controller)
    req_q.set_message_class(JSONMessage)
    msg = JSONMessage()
    msg.set_body(d)
    print "Sending request..."
    req_q.write(msg)
    print "Sent."
def send_message(d):
    aws_id, aws_key = util.read_aws_credential()
    aws_creds = {'aws_access_key_id': aws_id,
                 'aws_secret_access_key': aws_key}
    sqs = boto.sqs.connect_to_region(config.aws_region, **aws_creds)
    req_q = sqs.get_queue("%s_request" % config.controller)
    req_q.set_message_class(JSONMessage)
    msg = JSONMessage()
    msg.set_body(d)
    print "Sending request..."
    req_q.write(msg)
    print "Sent."
示例#16
0
def get_sqs_queue(sqs_name):
    #connect with SQS
    try:
        sqs = boto.sqs.connect_to_region(aws_region,
                                         aws_access_key_id=aws_id,
                                         aws_secret_access_key=aws_key)
    except Exception as e:
        print('Could not connect to SQS')
        print(e)

    print('Connected to AWS SQS: ' + str(sqs))
    return sqs, sqs.get_queue(sqs_name)
示例#17
0
文件: aws.py 项目: tp-tc/tooltool
    def get_sqs_queue(self, region_name, queue_name):
        key = (region_name, queue_name)
        if key in self._queues:
            return self._queues[key]

        sqs = self.connect_to('sqs', region_name)
        queue = sqs.get_queue(queue_name)
        if not queue:
            raise RuntimeError('no such queue %r in %s' %
                               (queue_name, region_name))
        self._queues[key] = queue
        return queue
示例#18
0
    def get_sqs_queue(self, region_name, queue_name):
        key = (region_name, queue_name)
        if key in self._queues:
            return self._queues[key]

        sqs = self.connect_to('sqs', region_name)
        queue = sqs.get_queue(queue_name)
        if not queue:
            raise RuntimeError('no such queue %r in %s' %
                               (queue_name, region_name))
        self._queues[key] = queue
        return queue
示例#19
0
def notify_sqs(args, zonename):
    sqs = boto.sqs.connect_to_region(args.region)

    instance = args.name + "." + zonename
    sqs_queue = "autoscaling"
    manual_termination = '{"Type" : "Notification", "Subject" : "Manual: termination for instance ' + instance + '", "Message" : "{\\"Event\\":\\"manual:EC2_INSTANCE_TERMINATE\\",\\"EC2InstanceId\\":\\"' + instance + '\\"}"}'

    q = sqs.get_queue(sqs_queue)
    q.set_message_class(RawMessage)
    m = RawMessage()
    m.set_body(manual_termination)
    q.write(m)
    print "SQS: Termination event sent for %s." % args.name
def main(config=Config()):
    try:
        _, _, _, sqs_region, _, sqs_queue = config.sqs_arn.split(':', 5)
    except Exception:
        raise Exception('invalid sqs arn')

    sqs = boto.sqs.connect_to_region(sqs_region)
    queue = sqs.get_queue(sqs_queue)

    if queue is None:
        raise Exception('could not connect to sqs queue: ' + config.sqs_arn)

    while True:
        message = queue.read(wait_time_seconds=20)
        if message is None:
            #out('Queue is empty')
            continue
        try:
            raw_body = message.get_body()
            out('Message received')
        except Exception:
            msg = 'Failed to get message body'
            out(msg)
            statsd.event('SQS Message Error', msg, alert_type='error')
            statsd.increment('tiles.processor.failed_get_body')
            continue
        try:
            body = json.loads(
                json.loads(raw_body)['Message'].replace("u'",
                                                        '"').replace("'", '"'))
        except Exception:
            msg = 'Invalid message body: ' + raw_body
            out(msg)
            statsd.event('JSON parse error', msg, alert_type='error')
            statsd.increment('tiles.processor.invalid_body')
            continue
        try:
            out('Processing: ' + str(body))
            process(message, body, config.ddfs_master, config.tag_prefix)
            statsd.increment('tiles.processor.processed')
        except Exception:
            msg = 'Failed to process: ' + str(body)
            out(msg)
            statsd.increment('tiles.processor.failed_to_process')
            statsd.event('Failed Processing message', msg, alert_type='error')
            stoppable()
示例#21
0
def get_keys_to_verify():
    sqs = boto.sqs.connect_to_region("us-west-2")
    queue = sqs.get_queue("spade-compactor-integration")
    queue.set_message_class(boto.sqs.message.RawMessage)
    while True:
        msgs = queue.get_messages(visibility_timeout=30)
        if not msgs:
            return
        msg = msgs[0]
        try:
            upload = json.loads(msg.get_body())
            if "keyname" in upload and "tablename" in upload:
                queue.delete_message(msg)
                yield (upload["keyname"], upload["tablename"])
        except Exception as e:
            print(e)
            print("bad message: " + msg.get_body())
            return
示例#22
0
def get_keys_to_verify():
    sqs = boto.sqs.connect_to_region("us-west-2")
    queue = sqs.get_queue("spade-compactor-integration")
    queue.set_message_class(boto.sqs.message.RawMessage)
    while True:
        msgs = queue.get_messages(visibility_timeout=30)
        if not msgs:
            return
        msg = msgs[0]
        try:
            upload = json.loads(msg.get_body())
            if "keyname" in upload and "tablename" in upload:
                queue.delete_message(msg)
                yield (upload["keyname"], upload["tablename"])
        except Exception as e:
            print(e)
            print("bad message: " + msg.get_body())
            return
示例#23
0
def test_accuracy(orig_dir, test_dir, sqs_queue, s3_bucket, num_tests):
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)
    s3 = boto.connect_s3()
    sqs = boto.connect_sqs()
    bucket = s3.get_bucket(s3_bucket)
    my_queue = sqs.get_queue(sqs_queue)
    while num_tests > 0:
        num_tests -= 1
        for m in my_queue.get_messages(num_messages=1):
            res = json.loads(m.get_body())
            my_queue.delete_message(m)
        k = bucket.get_key(res["f_name"])
        result_file = os.path.join(test_dir, res["f_name"])
        k.get_contents_to_filename(result_file)
        rms = np.load(result_file)
        test = compare_serial_rms(orig_dir, test_dir, res["file_id"], rms)
        print "Test", res["file_id"], "passed" if test else "failed"
def report(failures):
    fps_str = '\n' + '\n'.join(sorted(failures))
    log.warn("Fallbacks failed to proxy: %s" % fps_str)
    sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds)
    report_q = sqs.get_queue("notify_%s" % CONTROLLER)
    msg = JSONMessage()
    msg.set_body({
        'fp-alarm': "Fallbacks not proxying",
        'subject': "ALARM: fallback(s) failing to proxy",
        'send-email': True,
        'ip': fps_str,
        # These fields are expected by the controller, but they
        # make no sense in this case.
        'user': "******",
        'instance-id': 'unavailable',
        'port': "n/a"
    })
    report_q.write(msg)
示例#25
0
 def post(self):
     users_future = (UserScore.query(UserScore.ported == False)
                              .order(-UserScore.score)
                              .fetch_async(500))
     # Include secrets as a submodule if anything grows out of this.
     aws_creds = {'aws_access_key_id': '<REDACTED>',
                  'aws_secret_access_key': '<REDACTED>'}
     sqs = boto.sqs.connect_to_region('ap-southeast-1', **aws_creds)
     q = sqs.get_queue("notify_lanternctrl1-2")
     q.set_message_class(JSONMessage)
     msg = JSONMessage()
     users = list(users_future.get_result())
     msg.set_body({'port-users': '\n'.join(u.key.id() for u in users)})
     q.write(msg)
     logging.info("Sent request.")
     for user in users:
         user.ported = True
     ndb.put_multi(users)
     logging.info("Marked users as ported.")
示例#26
0
def actually_check_q():
    log.info("Checking queue...")
    sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds)
    ctrl_req_q = sqs.get_queue("%s_request" % CONTROLLER)
    ctrl_req_q.set_message_class(JSONMessage)
    msg = ctrl_req_q.read()
    if msg is None:
        log.info("Nothing in request queue.")
        return
    d = deunicodify(msg.get_body())
    ctrl_req_q.delete_message(msg)
    # DRY warning: FallbackProxyLauncher at lantern-controller.
    if 'launch-fp' in d:
        name = d['launch-fp']
        # Salt scripts consuming these should use backwards-compatible
        # defaults.
        pillars = d.get('launch-pillars', {})
        # Default proxy_protocol to tcp
        pillars.setdefault('proxy_protocol', 'tcp')
        # Make new fallbacks install from git by default.  We can't do this in
        # the fallback Salt config because there the defaults need to be
        # backwards compatible with old-style fallbacks.  We can't upgrade
        # those until we EOL old clients, since the new style of fallback
        # requires an auth token, that old fallbacks don't know to provide.
        pillars.setdefault('install-from', 'git')
        if 'auth_token' not in pillars:
            pillars['auth_token'] = random_auth_token()
        launch_fp(name, d, pillars)
    elif 'shutdown-fp' in d:
        shutdown_one(d['shutdown-fp'])
    elif 'shutdown-fl' in d:
        shutdown_one(d['shutdown-fl'])
    elif 'launch-fl' in d:
        launch('fl', d)
    elif 'launch-wd' in d:
        launch('wd', d)
    elif 'launch-ps' in d:
        launch('ps', d)
    elif 'launch-au' in d:
        launch('au', d)
    else:
        log.error("I don't understand this message: %s" % d)
示例#27
0
def listen(profile, queue, command):
    import sys
    import os
    import subprocess
    from StringIO import StringIO
    import ConfigParser
    import json
    import boto.sqs

    # while Boto can read its own configuration just fine, we want Antenna 
    # to also work with AWS CLI configuration file
    profile = 'profile ' + profile
    config = ConfigParser.ConfigParser()
    config.read(map(os.path.expanduser, ['~/.aws/config', '~/.boto']))
    region = config.get(profile, 'region')
    access_key = config.get(profile, 'aws_access_key_id')
    secret_key = config.get(profile, 'aws_secret_access_key')

    sqs = boto.sqs.connect_to_region(region, 
        aws_access_key_id=access_key, 
        aws_secret_access_key=secret_key)

    queue = sqs.get_queue(queue)
    if not queue:
        raise ValueError, "Queue does not exist."

    while True:
        messages = queue.get_messages(1, wait_time_seconds=20)

        if len(messages):
            message = messages[0]
            body = message.get_body()
            process = subprocess.Popen([command], stdin=subprocess.PIPE, shell=True)
            stdout, stderr = process.communicate(body)

            if stdout:
                sys.stdout.write(stdout)

            if stderr:
                sys.stderr.write(stderr)
            else:
                queue.delete_message(message)
示例#28
0
文件: sqs_events.py 项目: bryson/salt
def start(queue, profile=None, tag='salt/engine/sqs', owner_acct_id=None):
    '''
    Listen to sqs and fire message on event bus
    '''
    if __opts__.get('__role') == 'master':
        fire_master = salt.utils.event.get_master_event(
            __opts__,
            __opts__['sock_dir'],
            listen=False).fire_event
    else:
        fire_master = __salt__['event.send']

    message_format = __opts__.get('sqs.message_format', None)

    sqs = _get_sqs_conn(profile)
    q = None
    while True:
        if not q:
            q = sqs.get_queue(queue, owner_acct_id=owner_acct_id)
        _process_queue(q, queue, fire_master, tag=tag, owner_acct_id=owner_acct_id, message_format=message_format)
示例#29
0
def submit_task():
    queue_id = environ.get("SLURM_EC2_QUEUE_ID")
    task_id = "task-%s" % "".join(["%02x" % ord(x) for x in urandom(10)])

    if queue_id is None:
        print("SLURM_EC2_QUEUE_ID environment variable not set", file=stderr)
        return 1

    request_queue_name = "slurm-%s-request" % queue_id
    sqs = get_sqs()
    request_queue = sqs.get_queue(request_queue_name)
    request = request_queue.new_message(
        json_dumps({
            "id": task_id,
            "cmd": argv[1:],
            "env": dict(environ),
        }))
    request_queue.write(request)
    print(task_id)
    return 0
def actually_check_q():
    logging.info("Checking queue...")
    sqs = boto.sqs.connect_to_region(AWS_REGION, **aws_creds)
    ctrl_req_q = sqs.get_queue("%s_request" % CONTROLLER)
    ctrl_req_q.set_message_class(JSONMessage)
    msg = ctrl_req_q.read()
    if msg is None:
        logging.info("Nothing in request queue.")
        return
    d = msg.get_body()
    # DRY warning: FallbackProxyLauncher at lantern-controller.
    # TRANSITION: support old controllers for a while to make deployment less
    # time sensitive.
    userid = d.get('launch-fp-as', d.get('launch-invsrv-as'))
    if userid:
        # Lantern won't start without *some* refresh token.  If we don't get one
        # from the controller let's just make up a bogus one.
        refresh_token = d.get('launch-refrtok', '').strip() or 'bogus'
        # Backwards compatibility: we'll be getting serial numbers starting
        # from 1 in the new fallback balancing scheme.  Just in case we get
        # a new proxy launch request from an old controller, let's mark it as
        # 0.
        serial = d.get('launch-serial', 0)
        # Salt scripts consuming these should use backwards-compatible defaults.
        pillars = d.get('launch-pillars', {})
        # Default proxy_protocol to tcp
        pillars['proxy_protocol'] = pillars.get('proxy_protocol', 'tcp')
        launch_proxy(userid,
                     serial,
                     refresh_token,
                     msg,
                     pillars)
    elif 'shutdown-fp' in d:
        instance_id = d['shutdown-fp']
        logging.info("Got shutdown request for %s" % instance_id)
        nproxies = shutdown_proxy(instance_id)
        if nproxies != 1:
            logging.error("Expected one proxy shut down, got %s" % nproxies)
        ctrl_req_q.delete_message(msg)
    else:
        logging.error("I don't understand this message: %s" % d)
示例#31
0
def send_annotation_request():

    # Get bucket name and key from the S3 redirect URL
    bucket_name = bottle.request.query.bucket
    key = bottle.request.query.key.split('/')[1] # jobID~test.txt
    jobID = key.split('~')[0]
    file_name = key.split('~')[1]

    # Create a job item and persist it to the annotations database
    ann_table = Table('lyc-annotations', schema=[HashKey('job_id')], connection = ddb.connect_to_region('us-east-1'))
    data = {'job_id': jobID, 'username': '******', 's3_inputs_bucket': bucket_name, 's3_key_input_file': 'lyc/'+key, 'input_file_name': file_name, 'submit_time': int(time.time()), 'status':'pending'}
    ann_table.put_item(data=data)


    ###------------------------------------------------------------------###
    ## Create new request that includes the same data in the body
    # url ="http://ec2-52-2-66-81.compute-1.amazonaws.com:8888/annotator/analysis"
    # headers = {'Content-Type': 'application/json'}
    # ann_request = urllib2.Request(url, json.dumps(data), headers)

    ## Send request (as an HTTP POST) to the annotator API    
    # annotator_response = urllib2.urlopen(ann_request)

    ## returns a response to the user containing the job ID and the filename
    # return annotator_response
    ###------------------------------------------------------------------###


    # publish a notification to the SNS topic
    # http://ridingpython.blogspot.com/2011/11/aws-sns-how-to-send-out-messages-to-e.html
    queue = sqs.get_queue('lyc-job-requests')

    # publishes a notification to the SNS topic
    m = RawMessage()
    m.set_body(json.dumps(data))
    queue.write(m)

    # returns a response to the user containing the job ID and the filename
    response = '{"code": "200 OK", "data": {"id": "%s", "input_file": "%s"}}' % (jobID, key)
    return response
示例#32
0
def annotate_loop(sqs):
    q = sqs.get_queue(queue_name)
    while 1:
        # Get only one message at a time. We only want to process
        # one at a time
        msg = q.get_messages(1)
        if msg:
            print msg
            sreq = json.loads(msg[0].get_body())["Message"]
            print sreq
            if not sreq :
                continue
            #data = json.loads(sreq)
            data  = ast.literal_eval(sreq)
            print "Received new request: {0}".format(data)
            src_bucket  =  data.get('s3_inputs_bucket')
            key         =  data.get('s3_key_input_file')
            dest_bucket =  'gas-results'
            status      =  submit_job(src_bucket, key, dest_bucket);
            q.delete_message(msg[0])
        print "Sleeping ."
        time.sleep(10)

    return
示例#33
0
}

def comment(url, text, sha = None):
    """Post a GitHub comment."""
    content = { "body" : text }
    if sha:
        content["sha"] = sha
    requests.post(url, headers=github_headers, data=json.dumps(content))

# SQS

sqs = boto.sqs.connect_to_region(config["sqs"]["region"],
                                 aws_access_key_id=config["sqs"]["access"],
                                 aws_secret_access_key=config["sqs"]["secret"])

queue = sqs.get_queue(config["sqs"]["queue"])

# S3

s3 = boto.connect_s3(aws_access_key_id=config["s3"]["access"],
                     aws_secret_access_key=config["s3"]["secret"])
s3_bucket = s3.get_bucket(config["s3"]["bucket"])

def publish(bucket, prefix, site):
    """Publish jekyll output to S3."""
    chop = len(site) + 1
    for entry in os.walk(site):
        for filename in entry[2]:
            src_name = (entry[0] + "/" + filename)
            dst_name = src_name[chop:]
            key = bucket.new_key(prefix + dst_name)
示例#34
0
s3 = boto.connect_s3()
src_bucket_name = sys.argv[1]
target_bucket_name = sys.argv[2]

if not s3.lookup(target_bucket_name):
  print "Target Bucket not found ... Creating"

target_bucket_name = s3.get_bucket(target_bucket_name)

src_bucket_name = s3.get_bucket(src_bucket_name)
if not src_bucket_name:
  print "Source Bucket not found ... Exiting"
  sys.exit(1)

sqs = boto.sqs.connect_to_region("us-east-1")
devops = sqs.get_queue('cat_queue')

from boto.sqs.message import RawMessage
devops.set_message_class(RawMessage)

result = devops.get_messages()

if len(result) == 0:
  print "No messages available... Exiting"
  sys.exit(1)

#read the message body
m = result[0]
body = m.get_body()
decoded_message = json.loads(body)
decoded_message = json.loads(decoded_message["Message"])
示例#35
0
        argv = FLAGS(argv)[1:]
    except gflags.FlagsError, e:
        stderr.write("%s\\nUsage: %s update_id_addresses\\n%s\n" %
                     (e, sys.argv[0], FLAGS))
        return 1

    sqs = boto.sqs.connect_to_region(FLAGS.region)
    targets = map(sqs.create_queue, FLAGS.targets)
    if FLAGS.domain is not None:
        simpledb = boto.sdb.connect_to_region(FLAGS.region)
        domain = simpledb.create_domain(FLAGS.domain)
    else:
        domain = None
    bucket = boto.s3.connect_to_region(FLAGS.region).get_bucket(FLAGS.bucket)

    q = sqs.get_queue(FLAGS.source)
    messages = q.get_messages()
    batch = {}
    delete = []
    while messages:
        for incoming in messages:
            message = json.loads(incoming.get_body())
            print message['id']
            batch[message['id']] = {'outside': 'true'}
            delete.append(incoming)
            if len(batch) >= 25:
                domain.batch_put_attributes(batch)
                for deleted in delete:
                    for target in targets:
                        target.write(deleted)
                    q.delete_message(deleted)
示例#36
0
s3 = boto.connect_s3()
src_bucket_name = sys.argv[1]
target_bucket_name = sys.argv[2]

if not s3.lookup(target_bucket_name):
    print "Target Bucket not found ... Creating"

target_bucket_name = s3.get_bucket(target_bucket_name)

src_bucket_name = s3.get_bucket(src_bucket_name)
if not src_bucket_name:
    print "Source Bucket not found ... Exiting"
    sys.exit(1)

sqs = boto.sqs.connect_to_region("us-east-1")
devops = sqs.get_queue('cat_queue')

from boto.sqs.message import RawMessage
devops.set_message_class(RawMessage)

result = devops.get_messages()

if len(result) == 0:
    print "No messages available... Exiting"
    sys.exit(1)

#read the message body
m = result[0]
body = m.get_body()
decoded_message = json.loads(body)
decoded_message = json.loads(decoded_message["Message"])
示例#37
0
    def __init__(self, article, uuid):
        self.article = article
        self.uuid = uuid

    def summary(self):
        tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
        lines = tokenizer.tokenize(self.article.cleaned_text)
        return " ".join(lines[:3])

    def to_json(self):
        obj = {"title": self.article.title, "image": self.article.top_image.src, "uuid": self.uuid, "text": self.summary() }
        return json.dumps(obj)


if __name__ == '__main__':
    sqs = boto.sqs.connect_to_region("eu-west-1")
    inq = sqs.get_queue("thrumlinkparser")
    message = inq.read()
    if message is not None:
        data = json.loads(message.get_body())
        g = Goose()
        article = g.extract(url=data["url"])
        am = ArticleMessage(article, data["uuid"])
        outq = sqs.get_queue("thrumgifcreator")
        outmessage = outq.new_message(body=am.to_json())
        outq.write(outmessage)
        #print  article.cleaned_text, article.top_image.src, article.title
        inq = sqs.get_queue("thrumlinkparser")
        inq.delete_message(message)
    
示例#38
0
def writeMessage(sqs, queueName, jsonDump):
    myQueue = sqs.get_queue(queueName)
    sqs.send_message(myQueue, jsonDump)
    print 'Write To Queue Complete' + '\n'
def fetchsamples():

  count=0
  url="https://stream.twitter.com/1.1/statuses/filter.json?track=football&language=en"
  parameters = []
  
  sqs_counter=0
  sqs_msg=table_name+"\n"
  sqs = boto.connect_sqs()
  queue = sqs.get_queue('assi2_queue')
  m = Message()
  sqs_msg_counter=1

  #for i in range(2):
  #  print i 
  response = twitterreq(url, "GET", parameters)
  for line in response:
    f = open(table_name+".txt", "a+")
    #print line.strip()
      #f.write(line.strip()+"\n")
      #print line.strip()
      #print "_______________________"
    #print "aaaaa"+line.strip()
    if(line.strip()!=""): 
      j=dict()  
      t=json.loads(line.strip())
      #print t
      if t['coordinates']!=None:
        #j['location']=" "
        #print t
        #print t['coordinates']
        j['coordinates']=str(t['coordinates']['coordinates'][0])+","+str(t['coordinates']['coordinates'][1])
        #print t['coordinates']['coordinates'][1]
        j['tweet_id']=t['id_str']
        #text = " ".join([stemmer.stem(re.sub(r'[\W_]',"",kw)) for kw in t['text'].split(" ")])
        j['text']=t['text']
        j['created_at']=str(t['created_at'])
        j['follower_count']=str(t['user']['followers_count'])
        tt.put_item(data=j,overwrite=True)
        f.write(str(t['id_str'])+"\n")
        #tt.put_item(data=j,overwrite=True)
        count+=1

	if(sqs_counter==2): ## Number of messaged to send 
          m.set_body(sqs_msg)
          queue.write(m)
          print "Message sent to queue :" + str(sqs_msg_counter)
          sqs_msg_counter+=1
          sqs_msg=table_name+"\n"
          sqs_counter=0

	  sqs_msg+=t['id_str']+"\n"
	  sqs_counter+=1
		
        else:
          sqs_msg+=t['id_str']+"\n"
          sqs_counter+=1
	

	print count 
        if count==400:
          exit()
        #print count
        print "Tweet Entered __________________________"
        f.close() 
    '''  
示例#40
0
import sys, os, time, boto, boto.ec2, boto.sqs, boto.utils
from boto.sqs.message import Message
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table

sqs = boto.sqs.connect_to_region("us-west-2")
requests = sqs.get_queue('requests')
responses = sqs.get_queue('responses')

messages = Table('messages', schema=[HashKey('message')])

inst_id = boto.utils.get_instance_metadata()['instance-id']
ec2 = boto.ec2.connect_to_region("us-west-2")


idle = int(sys.argv[2])
check = 0


#should be set to kill itself after a period of time
while True:
	rs = requests.get_messages()

	#if idle != 0 and check >= idle:
		#print "Worker is shutting down"
		#ec2.terminate_instances(instance_ids=[inst_id])

	if len(rs) > 0:
	  	check = 0
	 	m = rs[0]	
	  	job = m.get_body()
示例#41
0
                parts = hdr.split("=")
                out[parts[0]] = parts[1].strip()
                hdr = inf.readline()
            return {"aws_access_key_id": out["AWSAccessKeyId"], "aws_secret_access_key": out["AWSSecretKey"]}
        # Colon format
        elif hdr[0] == "#":
            while hdr[0] == "#":
                hdr = inf.readline()
            out = dict()
            while hdr:
                parts = hdr.split(":")
                out[parts[0]] = parts[1].strip()
                hdr = inf.readline()
            return {"aws_access_key_id": out["accessKeyId"], "aws_secret_access_key": out["secretKey"]}

        # IAM format
        else:
            keys = inf.readline().split(",")
            return {"aws_access_key_id": keys[1].strip(), "aws_secret_access_key": keys[2].strip()}


keys = getKeys("rootkey.csv")
region = "us-west-2"
queue_name = "test"

if not queue_name:
    raise Exception("You must set a queue name.")

sqs = boto.sqs.connect_to_region(region, **keys)
queue = sqs.get_queue(queue_name) or sqs.create_queue(queue_name)
示例#42
0
        print('Cannot find or parse config file: ' + str(emsg))
        sys.exit(2)

    #logging for debug really you can set to logging.INFO to chill it out
    logging.basicConfig(filename=config['logfile'],level=logging.INFO)

    #try:
    #    tag = argv[1]
    #    ip = argv[2]
    #except BaseException, emsg:
    #    logging.warning(timestamp + ': Missing Arguments: ' + str(emsg) + ' : '+str(argv))
    #    sys.exit(2)

    try:
        sqs = boto.sqs.connect_to_region("us-west-2",aws_access_key_id=config['s3']['aws_access_key'], aws_secret_access_key=config['s3']['aws_secret_key'])
        queue = sqs.get_queue(config['sqs']['name'])
        queue.set_message_class(RawMessage)
        print queue.count()
        for msg in queue.get_messages(1,visibility_timeout=10):
            single_message = json.loads(msg.get_body())
            message =  json.loads(single_message['Message'])
            #clean up messages on setup
            if message['Event'] == "autoscaling:TEST_NOTIFICATION":
                queue.delete_message(msg)
            elif message['Event']  == "autoscaling:EC2_INSTANCE_TERMINATE":
                terminationAction(message,config)
            elif message['Event']  == "autoscaling:EC2_INSTANCE_LAUNCH":
                launchAction(message,config)

    except BaseException, emsg:
         logging.warning(timestamp + ': cannot get messages: ' + str(emsg))
示例#43
0
from bottle import route, run, static_file, response, template
import bottle
import boto
import boto.sqs
import json
import string
from boto.sqs.message import RawMessage
import signal
import os, subprocess

# Connect to SQS and get the message queue
conn = boto.connect_s3()
sqs = boto.connect_sqs()
queue = sqs.get_queue('lyc-job-requests')

# Poll the message queue in a loop
while queue:
    global msg
    # Set wait_time_seconds = 20 for your read; this matches the queue limit
    wait_time_seconds = 20
    # Attempt to read a message from the queue
    try:
        msg = queue.read(20)

        msg_body = json.loads(msg.get_body())

        # If a message was read, extract job parameters from the message body
        bucket_name = msg_body['s3_inputs_bucket']
        job_id = msg_body['job_id']
        key = msg_body['s3_key_input_file']
        filename = key.split('/')[1]
示例#44
0
def wait_tasks():
    queue_id = environ.get("SLURM_EC2_QUEUE_ID")
    if queue_id is None:
        print("SLURM_EC2_QUEUE_ID environment variable not set", file=stderr)
        return 1

    request_queue_name = "slurm-%s-request" % queue_id
    response_queue_name = "slurm-%s-response" % queue_id
    sqs = get_sqs()
    request_queue = sqs.get_queue(request_queue_name)
    response_queue = sqs.get_queue(response_queue_name)

    times_empty = 0

    while True:
        msg = response_queue.read()
        if msg is None:
            # Are there pending requests?
            attrs = request_queue.get_attributes()
            in_flight = (int(attrs['ApproximateNumberOfMessages']) +
                         int(attrs['ApproximateNumberOfMessagesNotVisible']))

            if in_flight == 0:
                times_empty += 1
            else:
                times_empty = 0

            # If we've not seen any responses and haven't found any unserved
            # requests for MAX_TIMES_EMPTY polls, stop.
            if times_empty >= MAX_TIMES_EMPTY:
                break

            if in_flight == 0:
                attrs = response_queue.get_attributes()
                long_poll_time = int(
                    attrs.get('ReceiveMessageWaitTimeSeconds', 0))

                print("No tasks in flight... will wait %d more second(s)" %
                      ((MAX_TIMES_EMPTY - times_empty) *
                       (SLEEP_TIME + long_poll_time)))
            else:
                print("%s task(s) in flight, but none are ready" %
                      (in_flight, ))

            sleep(SLEEP_TIME)
            continue

        times_empty = 0

        try:
            # Decode the message as JSON
            response = json_loads(msg.get_body())
            id = response.get("id")
            exit_code = response.get("exit_code")

            log_dirs = [".", environ["HOME"], "/tmp", "/var/tmp"]

            for log_dir in log_dirs:
                filename = "%s/%s.json" % (log_dir, id)
                try:
                    fd = open(filename, "w")
                    break
                except IOError:
                    pass
            else:
                # Couldn't open a log file.
                filename = "/dev/null"
                fd = open(filename, "w")

            fd.write(msg.get_body())
            fd.close()

            print("Task %s finished with exit code %s; logged to %s" %
                  (id, exit_code, filename))
        except:
            pass

        msg.delete()

    sqs.delete_queue(request_queue)
    sqs.delete_queue(response_queue)
    return 0
示例#45
0
#!/usr/bin/python

# Ler mensagens na fila do SQS

import boto.sqs

print "Inicio leitura de mensagem SQS"

sqs = boto.connect_sqs()
sqs = boto.sqs.connect_to_region('sa-east-1')

q = sqs.get_queue('DevOps')

#Quantidade de mensagens na fila
print q.count()

qtMensagensSolicitadas = 2
rs = q.get_messages(qtMensagensSolicitadas)

#Quantidade de mensagens que foram realmente lidas
print len(rs)

qtMensagens = 0
while qtMensagens < len(rs):
    m = rs[qtMensagens]
    print m.get_body()
    q.delete_message(m)
    qtMensagens = qtMensagens + 1

print "Fim da leitura de mensagem SQS"
示例#46
0
# - subscribe the SQS to the SNS topic
# - edit the template file "asg-name.cfg" and add additional services you need

import boto.sqs
import boto.ec2
import json
import fileinput
import os

qname = "asg-name"
fpath = "/root/asg-nagiosxi/"
needs_reaload = 0

ec2 = boto.ec2.connect_to_region("us-east-1")
sqs = boto.sqs.connect_to_region("us-east-1")
q = sqs.get_queue(qname)

# get all messages from queue
all_messages = []
rs = q.get_messages(num_messages=10, wait_time_seconds=1)

while len(rs) > 0:
    all_messages.extend(rs)
    rs = q.get_messages(num_messages=10, wait_time_seconds=1)
#

for i in range(len(all_messages)):
    needs_reaload = 1
    message = all_messages[i]
    js = json.loads(message.get_body())
    js2 = json.loads(js['Message'])
def writeMessage(sqs, queueName, jsonDump):
    myQueue = sqs.get_queue(queueName)
    sqs.send_message(myQueue, jsonDump)
    print 'Write To Queue Complete' + '\n'
示例#48
0
import cred_conf
import boto.sqs, boto.sns
from boto.sqs.message import Message
import json
import time
import logging

FORMAT = r'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(stream=sys.stderr, level=logging.INFO, format=FORMAT)


sqs = boto.sqs.connect_to_region(
        "us-east-1",
        aws_access_key_id=cred_aws.aws_access_key_id,
        aws_secret_access_key=cred_aws.aws_secret_access_key)
my_queue = sqs.get_queue('myqueue') or sqs.create_queue('myqueue')

sns = boto.sns.connect_to_region(
        "us-east-1",
        aws_access_key_id=cred_aws.aws_access_key_id,
        aws_secret_access_key=cred_aws.aws_secret_access_key)
topicarn = cred_aws.aws_sns_topicarn

# Flask app object
application = app = Flask(__name__)
app.config.from_object(cred_conf)
app.config['SQLALCHEMY_DATABASE_URI'] = cred_db.SQLALCHEMY_DATABASE_URI
daemon = None

# Dabatase connection
db = SQLAlchemy(app)
示例#49
0
#
import boto.sqs
import os, sys
import json
import uuid

class UrlPackage(object):
	"""Class for sending URLs to queue"""
	def __init__(self,url):
		self.url = url
		self.uuid = uuid.uuid4()

	def to_json(self):
		obj = {"url": self.url, "uuid": str(self.uuid) }
		return json.dumps(obj)

if __name__ == '__main__':
	url = UrlPackage(sys.argv[1])
	sqs = boto.sqs.connect_to_region("eu-west-1")
	q = sqs.get_queue("thrumlinkparser")
	message = q.new_message(body=url.to_json())
	q.write(message)


示例#50
0
        argv = FLAGS(argv)[1:]
    except gflags.FlagsError, e:
        stderr.write("%s\\nUsage: %s update_id_addresses\\n%s\n" %
                     (e, sys.argv[0], FLAGS))
        return 1

    sqs = boto.sqs.connect_to_region(FLAGS.region)
    targets = map(sqs.create_queue, FLAGS.targets)
    if FLAGS.domain is not None:
        simpledb = boto.sdb.connect_to_region(FLAGS.region)
        domain = simpledb.create_domain(FLAGS.domain)
    else:
        domain = None
    bucket = boto.s3.connect_to_region(FLAGS.region).get_bucket(FLAGS.bucket)

    q = sqs.get_queue(FLAGS.source)
    messages = q.get_messages()
    while messages:
        for incoming in messages:
            fn = None
            try:
                message = json.loads(incoming.get_body())
                fn = mktemp()
                try:
                    bucket.get_key(message['id']).get_contents_to_filename(fn)
                    data = commands.getoutput('djpeg  ' + fn )
                    image = Image.open(StringIO(data))
                    image = image.resize((64,64))
                except IOError:
                    continue 
                x, y = image.size
示例#51
0
文件: pull.py 项目: rockpuppeh/dmsof
import numpy
from connect_info import *

import boto
import boto.s3
import boto.sqs
from boto.sqs.message import Message
from boto.s3.connection import S3Connection
from boto.s3.key import Key

sqs = boto.sqs.connect_to_region("us-west-2",
                                 aws_access_key_id=aws_id,
                                 aws_secret_access_key=aws_key)
s3 = S3Connection(aws_access_key_id=aws_id, aws_secret_access_key=aws_key)

qi = sqs.get_queue('%s_input' % bucket)
qs = sqs.get_queue('%s_start' % bucket)
qo = sqs.get_queue('%s_output' % bucket)
b = s3.get_bucket(bucket)

while True:
    sys.stdout.write(
        'Checking queue for new input files (visibility timeout 5 minutes)\n')
    sys.stdout.flush()
    m = qi.read(300)
    if m == None:
        sys.stdout.write(
            '...no new input files available on queue, breaking loop\n')
        sys.stdout.flush()
        exit(1)
    else:
#!/usr/bin/python

# Ler mensagens na fila do SQS

import boto.sqs

print "Inicio leitura de mensagem SQS"

sqs = boto.connect_sqs()
sqs = boto.sqs.connect_to_region('sa-east-1')

q = sqs.get_queue('DevOps')

#Quantidade de mensagens na fila
print q.count()

qtMensagensSolicitadas = 2
rs = q.get_messages(qtMensagensSolicitadas)

#Quantidade de mensagens que foram realmente lidas
print len(rs)

qtMensagens = 0
while qtMensagens < len(rs):
	m = rs[qtMensagens]
	print m.get_body()
	q.delete_message(m)
	qtMensagens = qtMensagens + 1

print "Fim da leitura de mensagem SQS"
示例#53
0
        elif hdr[0] == '#':
            while hdr[0] == '#':
                hdr = inf.readline()
            out = dict()
            while hdr:
                parts = hdr.split(':')
                out[parts[0]] = parts[1].strip()
                hdr = inf.readline()
            return {
                'aws_access_key_id': out['accessKeyId'],
                'aws_secret_access_key': out['secretKey']
            }

        # IAM format
        else:
            keys = inf.readline().split(',')
            return {
                'aws_access_key_id': keys[1].strip(),
                'aws_secret_access_key': keys[2].strip()
            }


keys = getKeys('rootkey.csv')
region = 'us-west-2'
queue_name = "test"

if not queue_name: raise Exception('You must set a queue name.')

sqs = boto.sqs.connect_to_region(region, **keys)
queue = sqs.get_queue(queue_name) or sqs.create_queue(queue_name)
示例#54
0
def run_tasks():
    global exit_requested
    queue_id = environ.get("SLURM_EC2_QUEUE_ID")
    if queue_id is None:
        print("SLURM_EC2_QUEUE_ID environment variable not set", file=stderr)
        return 1

    request_queue_name = "slurm-%s-request" % queue_id
    response_queue_name = "slurm-%s-response" % queue_id

    sqs = get_sqs()
    request_queue = sqs.get_queue(request_queue_name)
    response_queue = sqs.get_queue(response_queue_name)

    # Handle a USR1 signal by setting the exit_requested flag -- we'll exit
    # when we find no more tasks in the queue.
    signal(SIGUSR1, request_exit)

    # Keep reading tasks from the request queue.
    while True:
        msg = request_queue.read()
        if msg is None:
            if exit_requested:
                break

            # Don't exit just yet...
            sleep(SLEEP_TIME)
            continue

        print("Message received: %r" % msg.get_body())

        try:
            # Decode the message as JSON
            request = json_loads(msg.get_body())

            print("Message decoded: %r" % (request, ))

            id = request.get("id")
            if id is None:
                raise ValueError("Missing id in message")

            cmd = request.get("cmd")
            env = request.get("env")

            if cmd is None:
                # No command to execute.
                exit_code = 127
                err = "No command to execute"
                print(err)
                out = ""
            elif not isinstance(cmd, (list, tuple)):
                # Invalid command line
                exit_code = 127
                err = ("Invalid command -- expected list instead of %s" %
                       (type(cmd).__name__))
                print(err)
                out = ""
            elif not isinstance(env, (dict, NoneType)):
                # Invalid environment
                exit_code = 127
                err = ("Invalid environment -- expected dict instead of %s" %
                       (type(cmd).__name__))
                print(err)
                out = ""
            else:
                print("Invoking: %r" % (cmd, ))
                print("Environment: %r" % (env, ))

                proc = Popen(cmd,
                             bufsize=BUFSIZE,
                             stdin=PIPE,
                             stdout=PIPE,
                             stderr=PIPE,
                             close_fds=True,
                             shell=False,
                             env=env)

                out, err = proc.communicate()
                exit_code = proc.returncode

                print("Process exited with exit_code %d" % exit_code)
                print("stdout:-----")
                print(out)
                print("stderr:-----")
                print(err)

                del proc

            response = response_queue.new_message(
                json_dumps({
                    'id': id,
                    'exit_code': exit_code,
                    'stdout': out,
                    'stderr': err
                }))
            response_queue.write(response)
            msg.delete()
        except ValueError as e:
            # Yikes.  Log this error and give up processing the message
            # (silently fail).
            print("Unable to decode message: %r" % (msg.get_body(), ))
            msg.delete()

    return 0