Пример #1
0
    def call_run(self):
        if self.store_type == store_kinesis.StoreKinesis:
            self.run_kinesis()
        elif self.store_type == store_firehose.StoreFirehose:
            self.run_firehose()
        else:
            print('Unsupported store type: ' + str(self.store_type))
        self.store.start_storing()
        self.store.start()

        # Start the remote hub
        # TODO: run boto commands to spawn an EC2 instance that
        # runs the remote hub
        m = MHMessage(self.hub_queue_up)
        m[constants.ATTR_COMMAND] = constants.START_OPERATING
        m[constants.ATTR_STREAM_NAME] = self.stream_name
        self.hub_queue_up.write(m)

        # Message loop
        while True:
            m = self.hub_queue_down.read(visibility_timeout=None,
                                         wait_time_seconds=1)
            if m is None:
                continue
            self.hub_queue_down.delete_message(m)
            # m = MHMessage(raw_mess.get_body())
            #if constants.VERBOSE:
            print('hub_remote: received SQS message: ' + str(m.get_body()))
            command = m[constants.ATTR_COMMAND]
            if command == constants.PLAY_SOUND:
                audio_out = AudioOut(m[constants.ATTR_SOUND_FILE],
                                     m[constants.ATTR_VOLUME_LEVEL])
                audio_out.play()
Пример #2
0
def uploadSuccess():
    hash = request.args.get("id", "")
    bucket = request.args.get("bucket", "")
    key = request.args.get("key", "")
    asset_url = "http://%s.s3.amazonaws.com/%s" % (bucket, key)

    logging.info( "Successful uploaded asset URL: %s" % asset_url )

    # If SQS enabled, create job in uploading state
    if sqs_enabled:
        message = MHMessage()
        message["ID"] = hash
        message["STATUS"] = "READY"
        message["ASSET_URL"] = asset_url
        status = queue.write(message)
        logging.info("Successfully queued asset URL: %s" % asset_url)

    return "Upload Successful! hash=%s" % hash
Пример #3
0
    for f in files:
        basefile = os.path.basename(f)
        mic = basefile.split("_")[0]
        bucket_name = BUCKET_PREFIX+mic.lower()
        try:
            print "Checking bucket: ", bucket_name
            bucket = check_s3_bucket_exists(s3cxn, bucket_name)    
        except Exception:
            if options.create_buckets == True:
                print "Creating bucket: ", bucket_name
                s3cxn.create_bucket(bucket_name)
            else:
                sys.exit(errno.ENFILE)
    
        bucket = s3cxn.get_bucket(bucket)
        key = bucket.get_key(basefile)
        exists = (key is not None)
        if exists == True:
            print "Key exists - skipping upload"
        else:
            print "Uploading: ", f
            s3_multipart_upload.main(f, bucket_name)         
            if options.donotqueue is False:
                m = MHMessage()
                m['input_file'] = os.path.basename(f)
                m['bucket'] = bucket_name
                print "Queueing message" , m.get_body(), " ==> ", options.queue
                q.write(m)
            else : 
                print "Skipping message queueing"
Пример #4
0
 def test_contains(self):
     msg = MHMessage()
     msg.update({'hello': 'world'})
     self.assertTrue('hello' in msg)
Пример #5
0
 def test_contains(self):
     msg = MHMessage()
     msg.update({'hello': 'world'})
     self.assertTrue('hello' in msg)
Пример #6
0
 def test_contains(self):
     msg = MHMessage()
     msg.update({"hello": "world"})
     self.assertTrue("hello" in msg)
Пример #7
0
                    print "Processing retured: ", code, string

            print "Moving processed file file to bucket"
            # KTK - TODO - should wrap in try except block - to catch failed upload
            s3_multipart_upload.main(full_hdf_path, bucket)

            retries = 0
            md = boto.utils.get_instance_metadata()
            m['instance-id'] = md['instance-id']
            m['public-hostname'] = md['public-hostname']
            m['completion-time'] = time.asctime(time.gmtime())
            qout.write(m)
            qin.delete_message(m)
            os.remove(full_hdf_path)
            os.remove(full_input_path)

        else:
            time.sleep(options.retry_wait)
            retries += 1

        if retries == options.max_retries:
            md = boto.utils.get_instance_metadata()
            ec2cxn = boto.connect_ec2()
            m = MHMessage()
            m['shutdown-time'] = time.asctime(time.gmtime())
            m['instance-id'] = md['instance-id']
            m['public-hostname'] = md['public-hostname']
            qout.write(m)
            if options.terminate:
                ec2cxn.terminate_instances([md['instance-id']])