def do_auth_command(self, bot, connection, event, command, parameters, reply_target, auth_level): if command not in self.auth_commands: return False # not for us if command == 'backup': creds = bot.db.get('s3_credentials') # <access key>|<secret key>|<bucket name>|<uploaded file name> akey, skey, bname, uname = creds.split('|') session = boto3.session.Session( aws_access_key_id = akey, aws_secret_access_key = skey, ) client = session.client('s3') transfer = boto3.s3.transfer.S3Transfer(client) transfer.upload_file( bot.module_parameters['database:name'], bname, uname, extra_args = { 'StorageClass': 'STANDARD_IA', }, ) bot.send(connection, reply_target, bot.db.get_random('yes'), event) return True return False
def test_upload_above_threshold(self): config = boto3.s3.transfer.TransferConfig(multipart_threshold=2 * 1024 * 1024) transfer = self.create_s3_transfer(config) filename = self.files.create_file_with_size('20mb.txt', filesize=20 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '20mb.txt') self.addCleanup(self.delete_object, '20mb.txt') self.assertTrue(self.object_exists('20mb.txt'))
def test_can_send_extra_params_on_upload(self): transfer = self.create_s3_transfer() filename = self.files.create_file_with_size('foo.txt', filesize=1024) transfer.upload_file(filename, self.bucket_name, 'foo.txt', extra_args={'ACL': 'public-read'}) self.addCleanup(self.delete_object, 'foo.txt') response = self.client.get_object_acl( Bucket=self.bucket_name, Key='foo.txt') self.assert_has_public_read_acl(response)
def test_upload_above_threshold(self): config = boto3.s3.transfer.TransferConfig( multipart_threshold=2 * 1024 * 1024) transfer = self.create_s3_transfer(config) filename = self.files.create_file_with_size( '20mb.txt', filesize=20 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '20mb.txt') self.addCleanup(self.delete_object, '20mb.txt') self.assertTrue(self.object_exists('20mb.txt'))
def test_can_send_extra_params_on_upload(self): transfer = self.create_s3_transfer() filename = self.files.create_file_with_size('foo.txt', filesize=1024) transfer.upload_file(filename, self.bucket_name, 'foo.txt', extra_args={'ACL': 'public-read'}) self.addCleanup(self.delete_object, 'foo.txt') response = self.client.get_object_acl( Bucket=self.bucket_name, Key='foo.txt') self.assert_has_public_read_acl(response)
def transferTest(self, filesetPrefix): start = time.time() transConfig = boto3.s3.transfer.TransferConfig() transfer = boto3.s3.transfer.S3Transfer(client=self.client, config=transConfig, osutil=None, manager=None) directory = self.config.directory_upload + filesetPrefix for file in os.listdir(directory): filename = directory + file s3Key = filesetPrefix + file print("upload", s3Key, filename) transfer.upload_file(filename, self.bucket, s3Key, callback=None, extra_args=None) self.duration(start, "boto3 s3.transfer:")
def test_upload_file_above_threshold_with_acl(self): config = boto3.s3.transfer.TransferConfig( multipart_threshold=5 * 1024 * 1024) transfer = self.create_s3_transfer(config) filename = self.files.create_file_with_size( '6mb.txt', filesize=6 * 1024 * 1024) extra_args = {'ACL': 'public-read'} transfer.upload_file(filename, self.bucket_name, '6mb.txt', extra_args=extra_args) self.addCleanup(self.delete_object, '6mb.txt') self.assertTrue(self.object_exists('6mb.txt')) response = self.client.get_object_acl( Bucket=self.bucket_name, Key='6mb.txt') self.assert_has_public_read_acl(response)
def test_upload_file_above_threshold_with_acl(self): config = boto3.s3.transfer.TransferConfig( multipart_threshold=5 * 1024 * 1024) transfer = self.create_s3_transfer(config) filename = self.files.create_file_with_size( '6mb.txt', filesize=6 * 1024 * 1024) extra_args = {'ACL': 'public-read'} transfer.upload_file(filename, self.bucket_name, '6mb.txt', extra_args=extra_args) self.addCleanup(self.delete_object, '6mb.txt') self.assertTrue(self.object_exists('6mb.txt')) response = self.client.get_object_acl( Bucket=self.bucket_name, Key='6mb.txt') self.assert_has_public_read_acl(response)
def awsUpload(bucket, local_path, s3_path): with open('uploaderConfig.json') as f: configVals = json.load(f) aws_key = configVals['access_key'] aws_secret_key = configVals['aws_secret_key'] region = configVals['region'] client = boto3.client('s3', configVals['region'], aws_access_key_id = configVals['access_key'], aws_secret_access_key = configVals['aws_secret_key'] ) transfer = S3Transfer(client) transfer.upload_file( local_path, bucket , s3_path, callback=ProgressPercentage(local_path) )
def test_progress_callback_on_upload(self): self.amount_seen = 0 lock = threading.Lock() def progress_callback(amount): with lock: self.amount_seen += amount transfer = self.create_s3_transfer() filename = self.files.create_file_with_size( '20mb.txt', filesize=20 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '20mb.txt', callback=progress_callback) self.addCleanup(self.delete_object, '20mb.txt') # The callback should have been called enough times such that # the total amount of bytes we've seen (via the "amount" # arg to the callback function) should be the size # of the file we uploaded. self.assertEqual(self.amount_seen, 20 * 1024 * 1024)
def test_progress_callback_on_upload(self): self.amount_seen = 0 lock = threading.Lock() def progress_callback(amount): with lock: self.amount_seen += amount transfer = self.create_s3_transfer() filename = self.files.create_file_with_size( '20mb.txt', filesize=20 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '20mb.txt', callback=progress_callback) self.addCleanup(self.delete_object, '20mb.txt') # The callback should have been called enough times such that # the total amount of bytes we've seen (via the "amount" # arg to the callback function) should be the size # of the file we uploaded. self.assertEqual(self.amount_seen, 20 * 1024 * 1024)
def test_callback_called_once_with_sigv4(self): # Verify #98, where the callback was being invoked # twice when using signature version 4. self.amount_seen = 0 lock = threading.Lock() def progress_callback(amount): with lock: self.amount_seen += amount client = self.session.client( 's3', self.region, config=Config(signature_version='s3v4')) transfer = boto3.s3.transfer.S3Transfer(client) filename = self.files.create_file_with_size( '10mb.txt', filesize=10 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '10mb.txt', callback=progress_callback) self.addCleanup(self.delete_object, '10mb.txt') self.assertEqual(self.amount_seen, 10 * 1024 * 1024)
def test_callback_called_once_with_sigv4(self): # Verify #98, where the callback was being invoked # twice when using signature version 4. self.amount_seen = 0 lock = threading.Lock() def progress_callback(amount): with lock: self.amount_seen += amount client = self.session.client( 's3', self.region, config=Config(signature_version='s3v4')) transfer = boto3.s3.transfer.S3Transfer(client) filename = self.files.create_file_with_size( '10mb.txt', filesize=10 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '10mb.txt', callback=progress_callback) self.addCleanup(self.delete_object, '10mb.txt') self.assertEqual(self.amount_seen, 10 * 1024 * 1024)
def test_upload_file_above_threshold_with_ssec(self): key_bytes = os.urandom(32) extra_args = { 'SSECustomerKey': key_bytes, 'SSECustomerAlgorithm': 'AES256', } config = boto3.s3.transfer.TransferConfig( multipart_threshold=5 * 1024 * 1024) transfer = self.create_s3_transfer(config) filename = self.files.create_file_with_size( '6mb.txt', filesize=6 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '6mb.txt', extra_args=extra_args) self.addCleanup(self.delete_object, '6mb.txt') # A head object will fail if it has a customer key # associated with it and it's not provided in the HeadObject # request so we can use this to verify our functionality. response = self.client.head_object( Bucket=self.bucket_name, Key='6mb.txt', **extra_args) self.assertEqual(response['SSECustomerAlgorithm'], 'AES256')
def awsUpload(bucket, local_path, s3_path): transfer.upload_file(local_path, bucket, s3_path, callback=ProgressPercentage(local_path)) md5_obj = MD5parse(bucket, s3_path, local_path) local_hash = md5_obj.localMD5() aws_md5 = md5_obj.awsMD5() result = aws_md5 == str(local_hash) output = "Object Name: " + s3_path.split( '/' )[-1] + "\t" + "AWS MD5 Hash: " + aws_md5 + "\t" + "Local MD5 Hash: " + str( local_hash) + "\t" + "File upload" + "\t" + "Match is " + str(result) writeMD5(output) print("MD5sum match for: " + s3_path.split('/')[-1]) print(output) print("Result is " + str(result)) return (result)
def test_upload_file_above_threshold_with_ssec(self): key_bytes = os.urandom(32) extra_args = { 'SSECustomerKey': key_bytes, 'SSECustomerAlgorithm': 'AES256', } config = boto3.s3.transfer.TransferConfig( multipart_threshold=5 * 1024 * 1024) transfer = self.create_s3_transfer(config) filename = self.files.create_file_with_size( '6mb.txt', filesize=6 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '6mb.txt', extra_args=extra_args) self.addCleanup(self.delete_object, '6mb.txt') # A head object will fail if it has a customer key # associated with it and it's not provided in the HeadObject # request so we can use this to verify our functionality. response = self.client.head_object( Bucket=self.bucket_name, Key='6mb.txt', **extra_args) self.assertEqual(response['SSECustomerAlgorithm'], 'AES256')
def load_from_filename(self, file_path): client = self._connect() transfer = boto3.s3.transfer.S3Transfer(client) transfer.upload_file(file_path, self._bucket, self._keyname)
def load_from_filename(self, file_path): client = self._connect() transfer = boto3.s3.transfer.S3Transfer(client) transfer.upload_file(file_path, self._bucket, self._keyname)
def uploadFile(filepath): transfer = S3Transfer(client) # Upload /tmp/myfile to s3://bucket/key and print upload progress. transfer.upload_file(filepath, BUCKET, PREFIX) return()