Exemple #1
0
 def test_file_chunks_with_smaller_last_chunk(self):
     with TmpFile() as tmp_filename:
         with open(tmp_filename, 'w') as f:
             f.truncate(10000)
         chunks = list(split_file(tmp_filename, 3333))
         self.assertEqual(len(chunks), 4,
                          'Incorrect number of chunks from file')
Exemple #2
0
 def _upload_large_file(self, dump_path, key_name):
     self.logger.debug('Using multipart S3 uploader')
     md5 = None
     if self.save_metadata:
         md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
     metadata = self._create_key_metadata(dump_path, md5=md5)
     with multipart_upload(self.bucket, key_name, metadata=metadata) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.debug(
                     'Uploaded chunk number {}'.format(chunk.number))
Exemple #3
0
 def _upload_large_file(self, dump_path, key_name):
     self.logger.debug('Using multipart S3 uploader')
     md5 = None
     if self.save_metadata:
         md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
     metadata = self._create_key_metadata(dump_path, md5=md5)
     with multipart_upload(self.bucket, key_name, metadata=metadata) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.debug('Uploaded chunk number {}'.format(
                 chunk.number))
Exemple #4
0
 def _upload_large_file(self, dump_path, key_name):
     self.logger.debug('Using multipart S3 uploader')
     with multipart_upload(self.bucket, key_name) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.debug(
                     'Uploaded chunk number {}'.format(chunk.number))
     with closing(self.bucket.get_key(key_name)) as key:
         self._ensure_proper_key_permissions(key)
         if self.save_metadata:
             md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
             self._save_metadata_for_key(key, dump_path, md5=md5)
Exemple #5
0
 def _upload_large_file(self, bucket, dump_path, key_name):
     from boto.exception import S3ResponseError
     self.logger.info('Using multipart S3 uploader')
     with multipart_upload(bucket, key_name) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.info('Uploaded chunk number {}'.format(
                 chunk.number))
     try:
         with closing(bucket.get_key(key_name)) as key:
             self._ensure_proper_key_permissions(key)
     except S3ResponseError:
         self.logger.warning('We could not ensure proper permissions. '
                             'We have no READ_ACP/WRITE_ACP permissions')
 def _upload_large_file(self, bucket, dump_path, key_name):
     from boto.exception import S3ResponseError
     self.logger.info('Using multipart S3 uploader')
     with multipart_upload(bucket, key_name) as mp:
         for chunk in split_file(dump_path):
             self._upload_chunk(mp, chunk)
             self.logger.info(
                     'Uploaded chunk number {}'.format(chunk.number))
     try:
         with closing(bucket.get_key(key_name)) as key:
             self._ensure_proper_key_permissions(key)
     except S3ResponseError:
         self.logger.warning(
                 'We could not ensure proper permissions. '
                 'We have no READ_ACP/WRITE_ACP permissions')
 def test_file_chunks_with_smaller_last_chunk(self):
     with TmpFile() as tmp_filename:
         with open(tmp_filename, 'w') as f:
             f.truncate(10000)
         chunks = list(split_file(tmp_filename, 3333))
         self.assertEqual(len(chunks), 4, 'Incorrect number of chunks from file')