Ejemplo n.º 1
0
 def test_generate_multipart_md5(self):
     with TmpFile() as tmp_filename:
         with open(tmp_filename, 'w') as f:
             f.truncate(10000)
         md5 = calculate_multipart_etag(tmp_filename, 3333)
         expected = '"728d2dbdd842b6a145cc3f3284d66861-4"'
         self.assertEqual(md5, expected, 'Wrong calculated md5 for multipart upload')
Ejemplo n.º 2
0
 def test_generate_multipart_md5(self):
     with TmpFile() as tmp_filename:
         with open(tmp_filename, 'w') as f:
             f.truncate(10000)
         md5 = calculate_multipart_etag(tmp_filename, 3333)
         expected = '"728d2dbdd842b6a145cc3f3284d66861-4"'
         self.assertEqual(md5, expected,
                          'Wrong calculated md5 for multipart upload')
Ejemplo n.º 3
0
 def _check_multipart_copy_integrity(self, key, dest_bucket, dest_key_name, path):
     from boto.exception import S3ResponseError
     try:
         dest_key = dest_bucket.get_key(dest_key_name)
         md5 = calculate_multipart_etag(path, CHUNK_SIZE)
         self._warn_if_etags_differ(key, dest_key, source_md5=md5)
     except S3ResponseError:
         self.logger.warning(
             'Skipping copy integrity. We have no READ_ACP/WRITE_ACP permissions')
Ejemplo n.º 4
0
 def _check_multipart_copy_integrity(self, key, dest_bucket, dest_key_name, path):
     from boto.exception import S3ResponseError
     try:
         dest_key = dest_bucket.get_key(dest_key_name)
         md5 = calculate_multipart_etag(path, CHUNK_SIZE)
         self._warn_if_etags_differ(key, dest_key, source_md5=md5)
     except S3ResponseError:
         self.logger.warning(
                 'Skipping copy integrity. We have no READ_ACP/WRITE_ACP permissions')
Ejemplo n.º 5
0
 def _upload_large_file(self, dump_path, key_name):
     self.logger.debug('Using multipart S3 uploader')
     md5 = None
     if self.save_metadata:
         md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
     metadata = self._create_key_metadata(dump_path, md5=md5)
     with multipart_upload(self.bucket, key_name, metadata=metadata) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.debug(
                     'Uploaded chunk number {}'.format(chunk.number))
Ejemplo n.º 6
0
 def _upload_large_file(self, dump_path, key_name):
     self.logger.debug('Using multipart S3 uploader')
     md5 = None
     if self.save_metadata:
         md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
     metadata = self._create_key_metadata(dump_path, md5=md5)
     with multipart_upload(self.bucket, key_name, metadata=metadata) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.debug('Uploaded chunk number {}'.format(
                 chunk.number))
Ejemplo n.º 7
0
 def _upload_large_file(self, dump_path, key_name):
     self.logger.debug('Using multipart S3 uploader')
     with multipart_upload(self.bucket, key_name) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.debug(
                     'Uploaded chunk number {}'.format(chunk.number))
     with closing(self.bucket.get_key(key_name)) as key:
         self._ensure_proper_key_permissions(key)
         if self.save_metadata:
             md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
             self._save_metadata_for_key(key, dump_path, md5=md5)