def test_delete_directory(request): session = boto3.session.Session( aws_access_key_id=os.getenv('LTD_KEEPER_TEST_AWS_ID'), aws_secret_access_key=os.getenv('LTD_KEEPER_TEST_AWS_SECRET')) s3 = session.resource('s3') bucket = s3.Bucket(os.getenv('LTD_KEEPER_TEST_BUCKET')) bucket_root = str(uuid.uuid4()) + '/' def cleanup(): print("Cleaning up the bucket") delete_directory(os.getenv('LTD_KEEPER_TEST_BUCKET'), bucket_root, os.getenv('LTD_KEEPER_TEST_AWS_ID'), os.getenv('LTD_KEEPER_TEST_AWS_SECRET')) request.addfinalizer(cleanup) file_paths = ['a/test1.txt', 'a/b/test2.txt', 'a/b/c/test3.txt'] _upload_files(file_paths, bucket, bucket_root, 'sample-key', 'max-age=3600', 'text/plain') # Delete b/* delete_directory(os.getenv('LTD_KEEPER_TEST_BUCKET'), bucket_root + 'a/b/', os.getenv('LTD_KEEPER_TEST_AWS_ID'), os.getenv('LTD_KEEPER_TEST_AWS_SECRET')) # Ensure paths outside of that are still available, but paths in b/ are # deleted bucket_paths = [] for obj in bucket.objects.filter(Prefix=bucket_root): if obj.key.endswith('/'): continue bucket_paths.append(obj.key) for p in file_paths: bucket_path = bucket_root + p if p.startswith('a/b'): assert bucket_path not in bucket_paths else: assert bucket_path in bucket_paths
def cleanup(): print("Cleaning up the bucket") delete_directory(os.getenv('LTD_KEEPER_TEST_BUCKET'), bucket_root, os.getenv('LTD_KEEPER_TEST_AWS_ID'), os.getenv('LTD_KEEPER_TEST_AWS_SECRET'))