def __init__(self, case):
     super(ObjectListPageTest, self).__init__(case)
     self.s3 = get_env_s3_client()
     self.file_num = 10
     self.max_page_size = 33
     self.test_files = [
         {
             "prefix": "",
             "name": "abcdef0"
         },
         {
             "prefix": "",
             "name": "abcdef1"
         },
         {
             "prefix": "",
             "name": "abcdef2"
         },
         {
             "prefix": "a/",
             "name": "bcdef0"
         },
         {
             "prefix": "a/",
             "name": "bcdef1"
         },
         {
             "prefix": "ab/",
             "name": "cdef0"
         },
         {
             "prefix": "abc/",
             "name": "def1"
         },
         {
             "prefix": "abc/",
             "name": "def2"
         },
         {
             "prefix": "ab/c",
             "name": "def02"
         },
         {
             "prefix": "ab/cd",
             "name": "ef02"
         },
         {
             "prefix": "ab/c/d",
             "name": "ef01"
         },
         {
             "prefix": "ab/c/d",
             "name": "ef02"
         },
     ]
示例#2
0
 def __init__(self, case):
     super(TransferTest, self).__init__(case)
     self.s3 = get_env_s3_client()
     tc = TransferConfig(multipart_threshold=5 * 1024 * 1024,
                         max_concurrency=10,
                         multipart_chunksize=5 * 1024 * 1024,
                         num_download_attempts=5,
                         max_io_queue=100,
                         io_chunksize=262144,
                         use_threads=True)
     self.tm = TransferManager(self.s3, tc)
 def __init__(self, case):
     super(ObjectListPageTest, self).__init__(case)
     self.s3 = get_env_s3_client()
     self.file_total = 0
     self.max_page_size = 17
     self.test_files = [
         {"prefix": "", "name": "abcdef0", "file_num": 3},
         {"prefix": "", "name": "abcdef1", "file_num": 4},
         {"prefix": "", "name": "abcde/f2", "file_num": 5},
         {"prefix": "a/", "name": "bcdef0", "file_num": 7},
         {"prefix": "a/", "name": "bcdef1", "file_num": 3},
         {"prefix": "ab/", "name": "cdef0", "file_num": 7},
         {"prefix": "abc/", "name": "def1", "file_num": 5},
         {"prefix": "abc/", "name": "def2", "file_num": 4},
         {"prefix": "ab/c", "name": "def02", "file_num": 5},
         {"prefix": "ab/cd", "name": "ef02", "file_num": 5},
         {"prefix": "ab/c/d", "name": "ef01", "file_num": 3},
         {"prefix": "ab/c/d", "name": "ef02", "file_num": 7},
         {"prefix": "dir1/", "name": "f1"},
         {"prefix": "dir1/", "name": "f2"},
         {"prefix": "dir1/", "name": "f3"},
         {"prefix": "dir1/", "name": "f4"},
         {"prefix": "dir1/", "name": "f5"},
         {"prefix": "dir1/", "name": "f6"},
         {"prefix": "dir1/", "name": "f7"},
         {"prefix": "dir1-2/", "name": "f1"},
         {"prefix": "dir1-2/", "name": "f2"},
         {"prefix": "dir1-2/", "name": "f3"},
         {"prefix": "dir1-2/", "name": "f4"},
         {"prefix": "dir1-2/", "name": "f5"},
         {"prefix": "dir1-2/", "name": "f6"},
         {"prefix": "dir1-2/", "name": "f7"},
         {"prefix": "dir2/", "name": "f1"},
         {"prefix": "dir2/", "name": "f2"},
         {"prefix": "dir2/", "name": "f3"},
         {"prefix": "dir2/", "name": "f4"},
         {"prefix": "dir2/", "name": "f5"},
         {"prefix": "dir2/", "name": "f6"},
         {"prefix": "dir2/", "name": "f7", "file_num": 10},
     ]
    def __init__(self, case):
        super(ObjectGetRangeTest, self).__init__(case)
        self.s3 = get_env_s3_client()
        self.file_size = 10000
        self.file_key = KEY_PREFIX + random_string(16)
        self.test_cases = [
            { "range":"bytes=0-499", "status_code": 206, "content-range":"bytes 0-499/10000", "content-length": 500 },
            { "range":"bytes=500-999", "status_code": 206, "content-range":"bytes 500-999/10000",  "content-length": 500 },
            { "range":"bytes=9500-", "status_code": 206, "content-range":"bytes 9500-9999/10000",  "content-length": 500 },
            { "range":"bytes=0-", "status_code": 206, "content-range":"bytes 0-9999/10000",  "content-length": 10000 },
            { "range":"bytes=0-0", "status_code": 206, "content-range":"bytes 0-0/10000",  "content-length": 1 },
            { "range":"bytes=-500", "status_code": 206, "content-range":"bytes 9500-9999/10000", "content-length": 500 },
            { "range":"bytes=-1", "status_code": 206, "content-range":"bytes 9999-9999/10000",  "content-length": 1 },
            { "range":"bytes=-0", "status_code": 206, "content-range":"bytes 0-9999/10000", "content-length": 10000 },
            { "range":"bytes=1-0", "status_code": 416 },
            { "range":"bytes=10", "status_code": 416 },
            { "range":"bytes=", "status_code": 416 },
            { "range":"bytes=abc", "status_code": 416 },
            { "range":"bytes=abc-123", "status_code": 416 },
            { "range":"1-0", "status_code": 416 },
        ]

        self._init_object()
示例#5
0
 def __init__(self, case):
     super(ObjectListTest, self).__init__(case)
     self.s3 = get_env_s3_client()
示例#6
0
class ListMultipartTest(S3TestCase):
    uploadInfos = []
    commonUploadInfos = []
    prefixUploadInfos = []
    s3 = get_env_s3_client()

    def __init__(self, case):
        super(ListMultipartTest, self).__init__(case)

    @classmethod
    def setUpClass(cls):
        """
        Init data before running test case, just create multipart upload session.
        :return:
        """
        # abort all previous multipart uploads, remove the impacts of the history data
        cls.abort_previous_upload()
        # create new test multipart uploads
        for i in range(INIT_NUMBERS):
            key = str(i) + ".txt"
            res = cls.s3.create_multipart_upload(Bucket=BUCKET, Key=key)
            upload = UploadInfo(upload_id=res["UploadId"], key=key)
            ListMultipartTest.uploadInfos.append(upload)
            ListMultipartTest.commonUploadInfos.append(upload)
        for i in range(INIT_NUMBERS):
            key = KEY_PREFIX_ONE + str(i) + ".txt"
            res = cls.s3.create_multipart_upload(Bucket=BUCKET, Key=key)
            upload = UploadInfo(upload_id=res["UploadId"], key=key)
            ListMultipartTest.uploadInfos.append(upload)
            ListMultipartTest.prefixUploadInfos.append(upload)
        for i in range(INIT_NUMBERS):
            key = KEY_PREFIX_TWO + str(i) + ".txt"
            res = cls.s3.create_multipart_upload(Bucket=BUCKET, Key=key)
            upload = UploadInfo(upload_id=res["UploadId"], key=key)
            ListMultipartTest.uploadInfos.append(upload)
        ListMultipartTest.uploadInfos = sorted(ListMultipartTest.uploadInfos,
                                               key=lambda upload_info: upload_info.key, )
        ListMultipartTest.prefixUploadInfos = sorted(ListMultipartTest.prefixUploadInfos,
                                                     key=lambda upload_info: upload_info.key, )
        ListMultipartTest.commonUploadInfos = sorted(ListMultipartTest.commonUploadInfos,
                                                     key=lambda upload_info: upload_info.key, )

    @classmethod
    def tearDownClass(cls):
        """
        Abort all the test data after test case finished.
        :return:
        """
        for upload_info in cls.uploadInfos:
            cls.s3.abort_multipart_upload(Bucket=BUCKET,
                                          Key=upload_info.key,
                                          UploadId=upload_info.upload_id)

    @classmethod
    def abort_previous_upload(cls):
        is_truncated = True
        key_marker = ""
        upload_id_marker = ""
        while is_truncated:
            if key_marker == "" and upload_id_marker == "":
                response = cls.s3.list_multipart_uploads(Bucket=BUCKET,
                                                         MaxUploads=MAX_UPLOADS)
            else:
                response = cls.s3.list_multipart_uploads(Bucket=BUCKET,
                                                         MaxUploads=MAX_UPLOADS,
                                                         KeyMarker=key_marker,
                                                         UploadIdMarker=upload_id_marker)
            if "Uploads" not in response or len(response["Uploads"]) <= 0:
                break
            for upload_info in response["Uploads"]:
                print("abort multipart upload,  upload id : {}, key : {}"
                      .format(upload_info["UploadId"], upload_info["Key"]))
                cls.s3.abort_multipart_upload(Bucket=BUCKET,
                                              Key=upload_info["Key"],
                                              UploadId=upload_info["UploadId"])
            is_truncated = response["IsTruncated"]
            key_marker = response["NextKeyMarker"]
            upload_id_marker = response["NextUploadIdMarker"]
            if not is_truncated:
                break

    def test_list_size_and_order(self):
        """
        check size and every key order
        1.the result list size count must equal with upload key size
        2.the result key must be ordered, and in ascending order of key
        :return:
        """
        is_truncated = True
        key_marker = ""
        upload_id_marker = ""
        response_list_size = 0
        loop_count = 0
        while is_truncated:
            if key_marker == "" and upload_id_marker == "":
                response = self.s3.list_multipart_uploads(Bucket=BUCKET,
                                                          MaxUploads=MAX_UPLOADS)
            else:
                response = self.s3.list_multipart_uploads(Bucket=BUCKET,
                                                          MaxUploads=MAX_UPLOADS,
                                                          KeyMarker=key_marker,
                                                          UploadIdMarker=upload_id_marker)
            response_list_size += len(response["Uploads"])

            if "Uploads" not in response or len(response["Uploads"]) <= 0:
                break
            for index in range(len(response["Uploads"])):
                upload_info = self.uploadInfos[(loop_count * MAX_UPLOADS) + index]
                self.assertEqual(response["Uploads"][index]["Key"], upload_info.key)
                self.assertEqual(response["Uploads"][index]["UploadId"], upload_info.upload_id)
            is_truncated = response["IsTruncated"]
            key_marker = response["NextKeyMarker"]
            upload_id_marker = response["NextUploadIdMarker"]
            loop_count = loop_count + 1
            if not is_truncated:
                break
        # check the key list size
        self.assertEqual(INIT_NUMBERS * 3, response_list_size)

    def test_prefix(self):
        """
        1. prefix list size must equal upload key list size with special prefix
        2. prefix list keys must start with special prefix : "list/test/one/"
        3. prefix list key must be in ordered, and in ascending order of key
        :return:
        """
        response = self.s3.list_multipart_uploads(Bucket=BUCKET,
                                                  Prefix=KEY_PREFIX_ONE)
        # prefix list size must equal upload key list size with special prefix
        self.assertEqual(len(response["Uploads"]), len(self.prefixUploadInfos))

        # prefix list key order must
        for index in range(len(response["Uploads"])):
            self.assertTrue(response["Uploads"][index]["Key"].startswith(KEY_PREFIX_ONE))
            self.assertEqual(response["Uploads"][index]["Key"], self.prefixUploadInfos[index].key)
            self.assertEqual(response["Uploads"][index]["UploadId"], self.prefixUploadInfos[index].upload_id)

    def test_delimiter(self):
        """
        Test delimiter, not contain prefix.
        :return:
        """
        delimiter = "/"
        response = self.s3.list_multipart_uploads(Bucket=BUCKET,
                                                  Delimiter=delimiter)
        # keys
        self.assertEqual(len(response["Uploads"]), len(self.commonUploadInfos))

        # common uploads
        for index in range(len(response["Uploads"])):
            self.assertEqual(response["Uploads"][index]["Key"], self.commonUploadInfos[index].key)
            self.assertEqual(response["Uploads"][index]["UploadId"], self.commonUploadInfos[index].upload_id)

        # prefix
        self.assertEqual(response["CommonPrefixes"][0]["Prefix"], "list/")

    def test_prefix_delimiter(self):
        """
        Test delimiter with prefix.
        :return:
        """
        delimiter = "/"
        response = self.s3.list_multipart_uploads(Bucket=BUCKET,
                                                  Prefix=KEY_PREFIX,
                                                  Delimiter=delimiter)
        # prefix
        self.assertEqual(len(response["CommonPrefixes"]), 2)
        self.assertEqual(response["CommonPrefixes"][0]["Prefix"], "list/test/one/")
        self.assertEqual(response["CommonPrefixes"][1]["Prefix"], "list/test/two/")
示例#7
0
 def __init__(self, case):
     super(BucketTest, self).__init__(case)
     self.s3 = get_env_s3_client()
示例#8
0
class CopyObjectTest(S3TestCase):
    s3 = get_env_s3_client()

    def __init__(self, case):
        super(CopyObjectTest, self).__init__(case)

    @classmethod
    def setUpClass(cls):
        """
        Create test data, such as putting object of source keys.
        :return:
        """
        cls.clear_data()
        # create source object info
        cls.create_key(key=SOURCE_KEY, content=b'copyTest source key content')
        cls.create_key(key=SOURCE_KEY_DIR, content='')
        cls.create_key(key=SOURCE_KEY_WITH_META,
                       content=b'copyTest source key with meta data',
                       mete_data=True)
        cls.create_key(key=SOURCE_KEY_RESET_META,
                       content=b'copyTest source key for used reset meta data')

    @classmethod
    def tearDownClass(cls):
        """
        Clean temp data, include initialized test data, create middle temp data and result data.
        :return:
        """
        cls.clear_data()

    @classmethod
    def create_key(cls, key, content, mete_data=False):
        """
        :return:
        """
        if mete_data:
            metadata = {
                META_DATE_KEY_1: META_DATE_VALUE_1,
                META_DATE_KEY_2: META_DATE_VALUE_2
            }
            cls.s3.put_object(Bucket=BUCKET,
                              Key=key,
                              Body=content,
                              Metadata=metadata)
        else:
            cls.s3.put_object(Bucket=BUCKET, Key=key, Body=content)

    @classmethod
    def delete_key(cls, key):
        """
        :return:
        """
        cls.s3.delete_object(Bucket=BUCKET, Key=key)

    def __copy_object(self,
                      s_bucket,
                      s_key,
                      t_bucket,
                      t_key,
                      is_dir=False,
                      contain_mete_data=False):
        # sleep one second, otherwise target key last modified is same with the source
        time.sleep(1)
        copy_source = {'Bucket': s_bucket, 'Key': s_key}
        self.s3.copy_object(CopySource=copy_source, Bucket=t_bucket, Key=t_key)
        source_response = self.s3.head_object(Bucket=s_bucket, Key=s_key)
        target_response = self.s3.head_object(Bucket=t_bucket, Key=t_key)
        self.assertNotEqual(target_response["ETag"], "")
        self.assertEqual(target_response["ETag"], source_response["ETag"])
        self.assertEqual(target_response["ContentLength"],
                         source_response["ContentLength"])
        self.assertGreater(target_response["LastModified"],
                           source_response["LastModified"])
        if is_dir:
            self.assertEqual(target_response["ContentLength"], 0)
        if contain_mete_data:
            target_meta_data = target_response["Metadata"]
            # target object must have metadata
            # The response returned metadata key we specified is lower,
            # so when using this metadata, we need to transfer metadata key to lower
            self.assertIsNotNone(target_meta_data)
            self.assertTrue(META_DATE_KEY_1.lower() in target_meta_data.keys())
            self.assertTrue(META_DATE_KEY_2.lower() in target_meta_data.keys())
            self.assertEqual(target_meta_data[META_DATE_KEY_1.lower()],
                             META_DATE_VALUE_1)
            self.assertEqual(target_meta_data[META_DATE_KEY_2.lower()],
                             META_DATE_VALUE_2)

    @classmethod
    def clear_data(cls):
        cls.delete_key(key=SOURCE_KEY)
        cls.delete_key(key=TARGET_KEY)
        cls.delete_key(key=SOURCE_KEY_DIR)
        cls.delete_key(key=TARGET_KEY_DIR)
        cls.delete_key(key=SOURCE_KEY_WITH_META)
        cls.delete_key(key=TARGET_KEY_WITH_META)
        cls.delete_key(key=SOURCE_KEY_RESET_META)
        cls.delete_key(key=TARGET_KEY_RESET_META)
        cls.delete_key(key=SOURCE_KEY_MODIFY_META)

    def test_copy_common_key(self):
        """
        Copy common file, using default value.
        :return:
        """
        self.__copy_object(s_bucket=BUCKET,
                           s_key=SOURCE_KEY,
                           t_bucket=BUCKET,
                           t_key=TARGET_KEY)

    def test_copy_dir(self):
        """
        Copy directory, the source key is a directory(object content is empty, and key path has suffix '/').
        The target key is a directory too.
        :return:
        """
        self.__copy_object(s_bucket=BUCKET,
                           s_key=SOURCE_KEY_DIR,
                           t_bucket=BUCKET,
                           t_key=TARGET_KEY_DIR,
                           is_dir=True)

    def test_copy_metadata(self):
        """
        Copy source object metadata.
        If the source object has self_defined metadata, target object has its metadata too in default.
        :return:
        """
        self.__copy_object(s_bucket=BUCKET,
                           s_key=SOURCE_KEY_WITH_META,
                           t_bucket=BUCKET,
                           t_key=TARGET_KEY_WITH_META,
                           contain_mete_data=True)

    def test_copy_reset_metadata(self):
        """
        Reset target object metadata, no matter whether source object has self_defined metadata.
        :return:
        """
        source_bucket = BUCKET
        target_bucket = BUCKET
        source_key = SOURCE_KEY_RESET_META
        target_key = TARGET_KEY_RESET_META
        # sleep one second, otherwise target key last modified is same with the source
        time.sleep(1)
        copy_source = {'Bucket': source_bucket, 'Key': source_key}
        metadata = {
            META_DATE_KEY_1: META_DATE_VALUE_1,
            META_DATE_KEY_2: META_DATE_VALUE_2
        }
        self.s3.copy_object(CopySource=copy_source,
                            Bucket=target_bucket,
                            Key=target_key,
                            MetadataDirective="REPLACE",
                            Metadata=metadata)

        source_response = self.s3.head_object(Bucket=source_bucket,
                                              Key=source_key)
        target_response = self.s3.head_object(Bucket=target_bucket,
                                              Key=target_key)
        # compare basic info
        self.assertNotEqual(target_response["ETag"], "")
        self.assertEqual(target_response["ETag"], source_response["ETag"])
        self.assertEqual(target_response["ContentLength"],
                         source_response["ContentLength"])
        self.assertGreater(target_response["LastModified"],
                           source_response["LastModified"])
        # compare metadata
        # source key not contain metadata
        # target key not contain metadata
        source_metadata = source_response["Metadata"]
        target_metadata = target_response["Metadata"]
        self.assertEqual(len(source_metadata), 0)
        self.assertEqual(len(target_metadata), 2)
        self.assertTrue(META_DATE_KEY_1.lower() in target_metadata.keys())
        self.assertTrue(META_DATE_KEY_2.lower() in target_metadata.keys())
        self.assertEqual(target_metadata[META_DATE_KEY_1.lower()],
                         META_DATE_VALUE_1)
        self.assertEqual(target_metadata[META_DATE_KEY_2.lower()],
                         META_DATE_VALUE_2)

    def test_copy_modify_metadata(self):
        """
        Modify a object's metadata via specifying the target key has same path with source object,
        and specify new metadata value.
        :return:
        """
        metadata = {META_DATE_KEY_1: META_DATE_VALUE_1}
        content = "b'copyTest source key for used modify meta data'"
        self.s3.put_object(Bucket=BUCKET,
                           Key=SOURCE_KEY_MODIFY_META,
                           Body=content,
                           Metadata=metadata)
        copy_source = {'Bucket': BUCKET, 'Key': SOURCE_KEY_MODIFY_META}
        metadata = {META_DATE_KEY_1: META_DATE_VALUE_1_MODIFIED}
        self.s3.copy_object(CopySource=copy_source,
                            Bucket=BUCKET,
                            Key=SOURCE_KEY_MODIFY_META,
                            MetadataDirective="REPLACE",
                            Metadata=metadata)
        response = self.s3.head_object(Bucket=BUCKET,
                                       Key=SOURCE_KEY_MODIFY_META)
        # target key not contain metadata
        metadata = response["Metadata"]
        self.assertEqual(len(metadata), 1)
        self.assertTrue(META_DATE_KEY_1.lower() in metadata.keys())
        self.assertEqual(metadata[META_DATE_KEY_1.lower()],
                         META_DATE_VALUE_1_MODIFIED)
示例#9
0
 def __init__(self, case):
     super(PolicyTest, self).__init__(case)
     self.s3 = get_env_s3_client()
示例#10
0
 def __init__(self, case):
     super(TaggingTest, self).__init__(case)
     self.s3 = get_env_s3_client()