def clear_app(): s3_creds = utils.s3_creds() hashtag = redis_queue.get("hashtag") logger.info("Got request to reset. Will clear the db and bucket") logger.debug("flushing redis image db") redis_images.flushdb() logger.debug("flushing redis queue db") redis_queue.flushdb() logger.debug("repopulating the hashtag") redis_queue.set("hashtag",hashtag) logger.debug("opening s3 connection") s3conn = boto.connect_s3(s3_creds['access_key'], s3_creds['secret_key'], host=s3_creds['url']) #set up an S3 style connections logger.debug("Getting bucket") bucket = s3conn.get_bucket(s3_creds['bucket_name']) #reference to the S3 bucket. logger.debug("deleting bucket contents in batches of 100") all_keys = [x.key for x in bucket.list()] for keys in batch_gen(all_keys,100): logger.info("Deleted image {} from object store".format(keys[0])) q.enqueue( bucket.delete_keys, keys, ttl=60, result_ttl=60, timeout=60 ) return len(all_keys)
def store_to_vipr(image_data): s3_creds = utils.s3_creds() worker_logger.debug("Storing to ViPR") worker_logger.debug("Connecting to ViPR") s3conn = boto.connect_s3(s3_creds['access_key'], s3_creds['secret_key'], host=s3_creds['url']) #set up an S3 style connections worker_logger.debug("Getting bucket") bucket = s3conn.get_bucket(s3_creds['bucket_name']) #reference to the S3 bucket. #lifecycle = Lifecycle() #new lifecycle managers #worker_logger.debug("Setting Bucket RulesViPR") #lifecycle.add_rule('Expire 1 day', status='Enabled', expiration=Expiration(days=1)) #make sure the bucket it set to only allow 1 day old images. Probably dont need to do this every time. TODO! image_guid = str(uuid.uuid4()) #Pick a random UUID! k = Key(bucket) #and gimme a new key to refer to file object. k.key = "{}.jpg".format(image_guid) #give it a name based on the UUID. worker_logger.debug("Uploading to ViPR") k.set_contents_from_string(image_data.getvalue()) #upload it. worker_logger.info("Stored image {} to object store".format(k.key)) return k #and return that key info.