def test_get_url_private_expired(self): # this is creating a bucket in the moto/mock s3 service s3conn = boto3.resource('s3') s3conn.create_bucket(Bucket='castletest') fileOb = upload_file_to_castle(self) aws.move_file(fileOb) fileOb = api.content.get(path='/file-repository/foobar.bin') # move the generated further into the past annotations = IAnnotations(fileOb) info = annotations.get(aws.STORAGE_KEY, PersistentMapping()) newgeneratedon = time() - aws.EXPIRES_IN - 1000 info.update({ 'generated_on': newgeneratedon, }) annotations[aws.STORAGE_KEY] = info resulturl = aws.get_url(fileOb) self.assertTrue(resulturl.startswith(self.test_base_url)) fileOb = api.content.get(path='/file-repository/foobar.bin') annotations = IAnnotations(fileOb) info = annotations.get(aws.STORAGE_KEY, PersistentMapping()) self.assertNotEqual(info["generated_on"], newgeneratedon) self.assertEqual(info["expires_in"], aws.EXPIRES_IN)
def test_delete_file(self): # this is creating a bucket in the moto/mock s3 service s3conn = boto3.resource('s3') s3conn.create_bucket(Bucket='castletest') fileOb = upload_file_to_castle(self) s3, bucket = aws.get_bucket(s3_bucket='castletest') uid = IUUID(fileOb) key = aws.KEY_PREFIX + uid aws.move_file(fileOb) # before the delete operation, the file should exist # on s3, and this statement should not raise an exception s3.meta.client.head_object(Bucket=bucket.name, Key=key) aws.delete_file(uid) # after the delete operation, the file should not exist # on s3, but should still exist in plone (even if it # has no data...apparently...admittedly a place of possible # improvement?) self.assertRaises( botocore.exceptions.ClientError, lambda: s3.meta.client.head_object(Bucket=bucket.name, Key=key))
def _file_edited(obj): try: if not obj.file: return _clean_aws(obj) except AttributeError: return if obj.portal_type == 'Video': video.process(obj) transaction.commit() if obj.portal_type not in ('Video', 'Audio', 'File'): return _clean_aws(obj) if 'pdf' in obj.file.contentType: # we also aren't moving pdfs out of here return _clean_aws(obj) registry = getUtility(IRegistry) if registry.get('castle.aws_s3_key', None) is None: return max_size_mb = registry.get('castle.max_file_size', 50) max_size = max_size_mb * 1024 * 1024 if obj.file.getSize() > max_size: aws.move_file(obj) else: return _clean_aws(obj)
def test_uploaded(self): # this is creating a bucket in the moto/mock s3 service s3conn = boto3.resource('s3') s3conn.create_bucket(Bucket='castletest') fileOb = upload_file_to_castle(self) self.assertFalse(aws.uploaded(fileOb)) aws.move_file(fileOb) fileOb = api.content.get(path='/file-repository/foobar.bin') self.assertTrue(aws.uploaded(fileOb))
def test_get_url_public_or_notexpired(self): # this is creating a bucket in the moto/mock s3 service s3conn = boto3.resource('s3') s3conn.create_bucket(Bucket='castletest') fileOb = upload_file_to_castle(self) api.content.transition(fileOb, 'publish') aws.move_file(fileOb) fileOb = api.content.get(path='/file-repository/foobar.bin') resulturl = aws.get_url(fileOb) self.assertTrue(resulturl.startswith(self.test_base_url))
def test_move_file_public(self): # this is creating a bucket in the moto/mock s3 service s3conn = boto3.resource('s3') s3conn.create_bucket(Bucket='castletest') fileOb = upload_file_to_castle(self) api.content.transition(fileOb, 'publish') s3, bucket = aws.get_bucket("castletest") uid = IUUID(fileOb) key = aws.KEY_PREFIX + uid # before move, there should be none of the meta information about the # moved version of the file self.assertFalse(hasattr(fileOb.file, 'original_filename')) self.assertFalse(hasattr(fileOb.file, 'original_size')) self.assertFalse(hasattr(fileOb.file, 'original_content_type')) # before move, there should be no annotations annotations = IAnnotations(fileOb) self.assertIsNone(annotations.get(aws.STORAGE_KEY, None)) # before move, there should be no file in s3 self.assertRaises( botocore.exceptions.ClientError, lambda: s3.meta.client.head_object(Bucket=bucket.name, Key=key)) aws.move_file(fileOb) fileOb = api.content.get(path='/file-repository/foobar.bin') # after move, there should be additional meta information on the # file object self.assertTrue(hasattr(fileOb.file, 'original_filename')) self.assertTrue(hasattr(fileOb.file, 'original_size')) self.assertTrue(hasattr(fileOb.file, 'original_content_type')) # after move, there should be annotations on the object annotations = IAnnotations(fileOb) url = annotations[aws.STORAGE_KEY].get('url', None) expires_in = annotations[aws.STORAGE_KEY].get('expires_in', None) generated_on = annotations[aws.STORAGE_KEY].get('generated_on', None) self.assertIsNotNone(url) self.assertIsNotNone(expires_in) self.assertIsNotNone(generated_on) self.assertEqual(expires_in, 0) # after move, there should be a file in s3, and checking for it should # produce no error s3.meta.client.head_object(Bucket=bucket.name, Key=key)