def uploadSuccess(): hash = request.args.get("id", "") bucket = request.args.get("bucket", "") key = request.args.get("key", "") asset_url = "http://%s.s3.amazonaws.com/%s" % (bucket, key) logging.info( "Successful uploaded asset URL: %s" % asset_url ) # If SQS enabled, create job in uploading state if sqs_enabled: message = MHMessage() message["ID"] = hash message["STATUS"] = "READY" message["ASSET_URL"] = asset_url status = queue.write(message) logging.info("Successfully queued asset URL: %s" % asset_url) return "Upload Successful! hash=%s" % hash
for f in files: basefile = os.path.basename(f) mic = basefile.split("_")[0] bucket_name = BUCKET_PREFIX+mic.lower() try: print "Checking bucket: ", bucket_name bucket = check_s3_bucket_exists(s3cxn, bucket_name) except Exception: if options.create_buckets == True: print "Creating bucket: ", bucket_name s3cxn.create_bucket(bucket_name) else: sys.exit(errno.ENFILE) bucket = s3cxn.get_bucket(bucket) key = bucket.get_key(basefile) exists = (key is not None) if exists == True: print "Key exists - skipping upload" else: print "Uploading: ", f s3_multipart_upload.main(f, bucket_name) if options.donotqueue is False: m = MHMessage() m['input_file'] = os.path.basename(f) m['bucket'] = bucket_name print "Queueing message" , m.get_body(), " ==> ", options.queue q.write(m) else : print "Skipping message queueing"
def test_contains(self): msg = MHMessage() msg.update({'hello': 'world'}) self.assertTrue('hello' in msg)