Exemplo n.º 1
0
 def test3(self):
     """ test wether will fallback only down 2 primary node """
     self.skipTest("disable acorrding to current behavior")
     return
     proxy = BeansDBProxy([self.proxy_addr])
     self.backend1.stop()
     #        self.backend2.stop()
     key3 = 'key3'
     i = 0
     store4 = MCStore(self.backend4_addr)
     ts_start = time.time()
     fallbacked = False
     while i < 2000:
         i += 1
         data3 = random_string(10)
         proxy.set(key3, data3)
         self.assertEqual(proxy.get(key3), data3)
         #            time.sleep(0.1)
         data3_ = store4.get(key3)
         if data3_ is None:
             print "store4 get nothing yet, round=", i
         else:
             print "fallbacked to store4 after %s tries" % (i)
             fallbacked = True
             self.assertEqual(data3_, data3)
             break
     ts_stop = time.time()
     if not fallbacked:
         self.fail("still not fallback to backend 4")
     print "%s seconds passed" % (ts_stop - ts_start)
     self.backend1.start()
     self.assert_(proxy.exists("key3"))
     store1 = MCStore(self.backend1_addr)
     self.assert_(store1.get("key3") is None)
     data3 = random_string(10)
     ts_recover_start = time.time()
     i = 0
     recovered = False
     while i < 2000:
         #data3 = random_string(10)
         i += 1
         #            time.sleep(0.1)
         proxy.set(key3, data3)
         self.assertEqual(proxy.get(key3), data3)
         data3_ = store1.get(key3)
         if data3_ is None:
             print "store1 get nothing yet, round=", i
         else:
             print "recover to store1 after %s tries, %s sec" % (
                 i, time.time() - ts_recover_start)
             recovered = True
             self.assertEqual(data3_, data3)
             break
     if not recovered:
         self.fail("still not fallback to backend 1")
Exemplo n.º 2
0
    def test3(self):
        """ test wether will fallback only down 2 primary node """
        self.skipTest("disable acorrding to current behavior")
        return
        proxy = BeansDBProxy([self.proxy_addr])
        self.backend1.stop()
#        self.backend2.stop()
        key3 = 'key3'
        i = 0
        store4 = MCStore(self.backend4_addr)
        ts_start = time.time()
        fallbacked = False
        while i < 2000:
            i += 1
            data3 = random_string(10)
            proxy.set(key3, data3)
            self.assertEqual(proxy.get(key3), data3)
#            time.sleep(0.1)
            data3_ = store4.get(key3)
            if data3_ is None:
                print "store4 get nothing yet, round=", i
            else:
                print "fallbacked to store4 after %s tries" % (i)
                fallbacked = True
                self.assertEqual(data3_, data3)
                break
        ts_stop = time.time()
        if not fallbacked:
            self.fail("still not fallback to backend 4")
        print "%s seconds passed" % (ts_stop - ts_start)
        self.backend1.start()
        self.assert_(proxy.exists("key3"))
        store1 = MCStore(self.backend1_addr)
        self.assert_(store1.get("key3") is None)
        data3 = random_string(10)
        ts_recover_start = time.time()
        i = 0
        recovered = False
        while i < 2000:
            #data3 = random_string(10)
            i += 1
#            time.sleep(0.1)
            proxy.set(key3, data3)
            self.assertEqual(proxy.get(key3), data3)
            data3_ = store1.get(key3)
            if data3_ is None:
                print "store1 get nothing yet, round=", i
            else:
                print "recover to store1 after %s tries, %s sec" % (i, time.time() - ts_recover_start)
                recovered = True
                self.assertEqual(data3_, data3)
                break
        if not recovered:
            self.fail("still not fallback to backend 1")
Exemplo n.º 3
0
 def test_head_object_if_match(self):
     size = 1024 * 256
     self.assert_head_bucket_result(result=self.s3.head_bucket(
         Bucket=env.BUCKET))
     key = KEY_PREFIX + random_string(16)
     body = random_bytes(size)
     expect_md5 = compute_md5(body)
     self.assert_put_object_result(result=self.s3.put_object(
         Bucket=env.BUCKET, Key=key, Body=body),
                                   etag=expect_md5)
     self.assert_head_object_result(result=self.s3.head_object(
         Bucket=env.BUCKET, Key=key, IfMatch=expect_md5),
                                    etag=expect_md5,
                                    content_length=size)
     try:
         fake_etag = '1b2cf535f27731c974343645a3985328'
         self.s3.head_object(Bucket=env.BUCKET, Key=key, IfMatch=fake_etag)
         self.fail()  # Non exception occurred is illegal.
     except Exception as e:
         # Error code 412 is legal.
         self.assert_client_error(error=e, expect_status_code=412)
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=env.BUCKET, Key=key))
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=env.BUCKET, Key=KEY_PREFIX))
Exemplo n.º 4
0
 def test_head_object_if_modified_since(self):
     size = 1024 * 256
     self.assert_head_bucket_result(result=self.s3.head_bucket(
         Bucket=env.BUCKET))
     key = KEY_PREFIX + random_string(16)
     body = random_bytes(size)
     expect_md5 = compute_md5(body)
     self.assert_put_object_result(result=self.s3.put_object(
         Bucket=env.BUCKET, Key=key, Body=body),
                                   etag=expect_md5)
     self.assert_head_object_result(result=self.s3.head_object(
         Bucket=env.BUCKET,
         Key=key,
         IfModifiedSince=datetime.datetime(1946, 2, 14)),
                                    etag=expect_md5,
                                    content_length=size)
     try:
         self.s3.head_object(Bucket=env.BUCKET,
                             Key=key,
                             IfModifiedSince=datetime.datetime.now())
         self.fail()  # Non exception occurred is illegal.
     except Exception as e:
         # Error code 304 is legal.
         self.assert_client_error(error=e, expect_status_code=304)
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=env.BUCKET, Key=key))
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=env.BUCKET, Key=KEY_PREFIX))
Exemplo n.º 5
0
 def test_put_object_conflict_scene2(self):
     """
     This test tests response when target key exists but file mode conflict with expect.
     :return:
     """
     key = KEY_PREFIX + random_string(16)
     empty_content_etag = 'd41d8cd98f00b204e9800998ecf8427e'
     self.assert_put_object_result(result=self.s3.put_object(Bucket=BUCKET,
                                                             Key=key),
                                   etag=empty_content_etag)
     self.assert_head_object_result(result=self.s3.head_object(
         Bucket=BUCKET, Key=key),
                                    etag=empty_content_etag)
     try:
         directory_mime = 'application/directory'
         self.s3.put_object(Bucket=BUCKET,
                            Key=key,
                            ContentType=directory_mime)
         self.fail()
     except Exception as e:
         self.assert_client_error(error=e, expect_status_code=409)
         pass
     finally:
         self.assert_delete_objects_result(result=self.s3.delete_objects(
             Bucket=BUCKET,
             Delete={'Objects': [{
                 'Key': key
             }, {
                 'Key': KEY_PREFIX
             }]}))
Exemplo n.º 6
0
    def test1(self):
        data1 = random_string(10)
        time.sleep(1)

        print "test normal write"
        proxy = BeansDBProxy([self.proxy_addr])
        proxy.delete('key1')
        proxy.set('key1', data1)
        self._assert_data(self.backend1_addr, 'key1', data1)
        self._assert_data(self.backend2_addr, 'key1', data1)
        self._assert_data(self.backend3_addr, 'key1', data1)
        self.assert_(MCStore(self.backend2_addr).exists('key1'))

        cmd = "ls -l /proc/%s/fd" % (self.proxy_p.pid)
        print cmd
        print subprocess.check_output(cmd, shell=True)

        print "move log"
        if os.path.exists(self.accesslog_bak):
            os.remove(self.accesslog_bak)
        if os.path.exists(self.errorlog_bak):
            os.remove(self.errorlog_bak)
        os.rename(self.accesslog, self.accesslog_bak)
        os.rename(self.errorlog, self.errorlog_bak)
        print "write more data to see if new log not exists"
        data1 = random_string(10)
        proxy.set('key1', data1)
        self.assert_(not os.path.exists(self.accesslog))
        self.assert_(not os.path.exists(self.errorlog))

        time.sleep(5)

        print "send SIGINT signal, should re-open log file" 
        os.kill(self.proxy_p.pid, signal.SIGINT)

        cmd = "ls -l /proc/%s/fd" % (self.proxy_p.pid)
        print subprocess.check_output(cmd, shell=True)

        s = os.stat(self.accesslog)
        self.assert_(os.path.exists(self.accesslog))
        self.assert_(os.path.exists(self.errorlog))
        print "see if write to new accesslog"
        proxy.get('key1')
        time.sleep(1)
        s_new = os.stat(self.accesslog)
        print s_new.st_size, s.st_size
        self.assert_(s_new.st_size > s.st_size)
Exemplo n.º 7
0
    def add_node(self,
                 u_of_edge,
                 shape,
                 names,
                 initializer=tf.glorot_normal_initializer(),
                 regularizer=None,
                 shared=None,
                 collections=None):
        """ Creates a node with dangling edges defined by shape
            Internally, these creates dummy nodes on these dangling edges

            :param u_of_edge: Name of the node e.g. "A"
            :param shape: Dimensions of the exposed indices
            :param names: Name of the open indices e.g. "W" for width
            :param initializer: Initialization strategy
            :param regularizer: If a regularization term, for example L2 norm, weight decay
            :param shared: (boolean) If the weight is shared across layers
            :param collections: Used if you want to group tensorflow variables
            :param shared: When creating the tensorflow variable, ignore the Graph name scope
        """

        if self._is_compiled:
            raise Exception(
                "Unable to add more edge/nodes once the graph is compiled")

        assert len(shape) == len(names), "Must have a name for each open index"

        if not self._graph.has_node(u_of_edge):
            # TODO: How can we integrate shared property (share weights across layers)
            self._graph.add_node(u_of_edge,
                                 dummy_node=False,
                                 initializer=initializer,
                                 regularizer=regularizer,
                                 shared=shared,
                                 collections=collections
                                 )  # Make it possible to share (shared=shared)

        # Create a dummy node for each of the exposed indices
        dummy_node_names = []
        for i in range(len(shape)):
            dummy_node_names.append(random_string())
            self._graph.add_node(dummy_node_names[i],
                                 dummy_node=True,
                                 initializer=None,
                                 regularizer=None,
                                 shared=None,
                                 collections=None)

        # Now connect to the dummy nodes
        for i in range(len(shape)):
            self._graph.add_edge(u_of_edge,
                                 dummy_node_names[i],
                                 weight=shape[i],
                                 name=names[i])

        # So can chain operations
        return self
Exemplo n.º 8
0
 def run():
     key = KEY_PREFIX + random_string(16)
     metadata = {
         random_string(8).lower(): random_string(16),
         random_string(8).lower(): random_string(16)
     }
     self.assert_put_object_result(result=self.s3.put_object(
         Bucket=BUCKET, Key=key, Metadata=metadata))
     self.assert_head_object_result(result=self.s3.head_object(
         Bucket=BUCKET, Key=key),
                                    metadata=metadata)
     self.assert_delete_objects_result(result=self.s3.delete_objects(
         Bucket=BUCKET,
         Delete={'Objects': [{
             'Key': key
         }, {
             'Key': KEY_PREFIX
         }]}))
Exemplo n.º 9
0
    def test_object_tagging(self):

        key = KEY_PREFIX + random_string(16)
        init_tag_set = generate_tag_set()
        result = self.s3.put_object(Bucket=env.BUCKET, Key=key, Tagging=encode_tag_set(init_tag_set))
        self.assert_put_object_result(result=result)
        etag = result['ETag']

        self.assert_head_object_result(
            result=self.s3.head_object(Bucket=env.BUCKET, Key=key),
            etag=etag,
            content_length=0)

        self.assert_get_tagging_result(
            result=self.s3.get_object_tagging(Bucket=env.BUCKET, Key=key),
            expect_tag_set=init_tag_set)

        def run():
            tag_set = generate_tag_set(size=4, key_length=8, value_length=16)

            self.assert_put_tagging_result(
                result=self.s3.put_object_tagging(
                    Bucket=env.BUCKET,
                    Key=key,
                    Tagging={'TagSet': tag_set}))

            self.assert_get_tagging_result(
                result=self.s3.get_object_tagging(Bucket=env.BUCKET, Key=key),
                expect_tag_set=tag_set)

            self.assert_delete_tagging_result(
                result=self.s3.delete_object_tagging(Bucket=env.BUCKET, Key=key))

            self.assert_get_tagging_result(
                result=self.s3.get_object_tagging(Bucket=env.BUCKET, Key=key),
                expect_tag_set=empty_tag_set)

        test_count = 50
        count = 0
        while count < test_count:
            run()
            count += 1

        # Clean up test data
        self.assert_delete_objects_result(
            result=self.s3.delete_objects(
                Bucket=env.BUCKET,
                Delete={
                    'Objects': [
                        {'Key': key},
                        {'Key': KEY_PREFIX}
                    ]
                }
            )
        )
Exemplo n.º 10
0
    def test2(self):
        data1 = random_string(10)
        data2 = random_string(10)
        time.sleep(1)

        print "test normal write"
        proxy = BeansDBProxy([self.proxy_addr])
        proxy.delete('key1')
        proxy.set('key1', data1)
        self._assert_data(self.backend1_addr, 'key1', data1)
        self._assert_data(self.backend2_addr, 'key1', data1)
        self._assert_data(self.backend3_addr, 'key1', data1)
        self._assert_data(self.backend4_addr, 'key1', None, "temporary node should not have the key when all primary nodes is good")

        proxy.delete('key2')
        print "down backend1 and backend2, proxy.get should be ok"
        self.backend1.stop()
        self.backend2.stop()
        proxy.set('key2', data2)
        self.assertEqual(proxy.get('key2'), data2)
        self._assert_data(self.proxy_addr, 'key2', data2)
        with self.assertRaises(Exception) as exc:
            MCStore(self.backend1_addr).get('key2')
            MCStore(self.backend2_addr).get('key2')
        self._assert_data(self.backend3_addr, 'key2', data2)
        #"temporary node should have the key when primary nodes < 2"
        self._assert_data(self.backend4_addr, 'key2', data2)
        print "test delete under bad sistuation, will raise error according to current behavior"
        with self.assertRaises(Exception) as exc:
            proxy.delete('key2')
        self._assert_data(self.backend3_addr, 'key2', None)
        self._assert_data(self.backend4_addr, 'key2', None)

        
        print "start backend2, (backend1 still down), test delete"
        self.backend2.start()
        time.sleep(10)
        proxy.delete('key2')
        self._assert_data(self.proxy_addr, 'key2', None)
        self._assert_data(self.backend2_addr, 'key2', None)
        self._assert_data(self.backend3_addr, 'key2', None)
        self._assert_data(self.backend4_addr, 'key2', None)
Exemplo n.º 11
0
    def test1(self):
        data1 = random_string(10)
        data2 = random_string(10)
        time.sleep(1)

        print "test normal write"
        proxy = BeansDBProxy([self.proxy_addr])
        proxy.delete('key1')
        proxy.set('key1', data1)
        self._assert_data(self.backend1_addr, 'key1', data1)
        self._assert_data(self.backend2_addr, 'key1', data1)
        self._assert_data(self.backend3_addr, 'key1', data1)
        self.assert_(MCStore(self.backend2_addr).exists('key1'))

        print "down backend2, proxy.get should be ok"
        self.backend2.stop()
        proxy.delete('key2')
        self.assert_(not proxy.exists('key2'))
        self.assert_(not MCStore(self.backend1_addr).exists('key2'))
        self.assert_(not MCStore(self.backend3_addr).exists('key2'))
        proxy.set('key2', data2)
        self.assertEqual(proxy.get('key2'), data2)

        self.assert_(proxy.exists('key2'))
        self.assert_(MCStore(self.backend3_addr).exists('key2'))
        self.assert_(MCStore(self.backend1_addr).exists('key2'))
        self._assert_data(self.proxy_addr, 'key2', data2)
        self._assert_data(self.backend1_addr, 'key2', data2)
        with self.assertRaises(Exception) as exc:
            MCStore(self.backend2_addr).get('key2')

        self._assert_data(self.backend3_addr, 'key2', data2)

        print "down backend1, proxy.get/set should fail"
        self.backend1.stop()
        self.assertEqual(proxy.get('key1'), data1)
        with self.assertRaises(Exception) as exc:
            MCStore(self.backend1_addr).get('key2')
            MCStore(self.backend2_addr).get('key2')
        with self.assertRaises(Exception) as exc:
            proxy.set('key2', data2)
Exemplo n.º 12
0
 def __do_test_put_objects_independent(self, file_size, file_num):
     """
     Put multiple objects with different object keys.
     Process:
     :type file_size: int
     :type file_num: int
     :param file_size: numeric value, size of file (unit: byte)
     :param file_num: numeric value, number of file to put
     :return:
     """
     file_names = []
     file_name_prefix = random_string(16)
     for i in range(file_num):
         file_name = "%s_%d" % (file_name_prefix, i)
         self.__do_test_put_objects(file_name=file_name,
                                    file_size=file_size,
                                    file_num=1)
         file_names.append(file_name)
     # check files
     matches = 0
     marker = ''
     truncated = True
     while truncated:
         response = self.s3.list_objects(Bucket=BUCKET,
                                         Prefix=file_name_prefix,
                                         Marker=marker,
                                         MaxKeys=100)
         self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'],
                          200)
         if 'Contents' in response:
             contents = response['Contents']
             self.assertTrue(type(contents), list)
             matches = matches + len(contents)
         if 'NextMarker' in response:
             next_marker = response['NextMarker']
             if next_marker != '':
                 marker = next_marker
         if 'IsTruncated' in response:
             truncated = bool(response['IsTruncated'])
     self.assertEqual(matches, file_num)
     # batch delete objects
     objects = []
     for file_name in file_names:
         objects.append({"Key": file_name})
     delete = {"Objects": objects}
     self.assert_delete_objects_result(
         result=self.s3.delete_objects(Bucket=BUCKET, Delete=delete))
     # check deletion result
     response = self.s3.list_objects(Bucket=BUCKET, Prefix=file_name_prefix)
     self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
     self.assertFalse('Contents' in response)
Exemplo n.º 13
0
    def test_list_object_v2_etag(self):
        file_num = 40
        files = {}  # key -> etag
        for _ in range(file_num):
            key = KEY_PREFIX + random_string(16)
            result = self.s3.put_object(Bucket=env.BUCKET,
                                        Key=key,
                                        Body=random_bytes(16))
            self.assert_put_object_result(result=result)
            files[key] = result['ETag'].strip('"')

        # validate list result
        contents = []
        continuation_token = ''
        truncated = True
        while truncated:
            result = self.s3.list_objects_v2(
                Bucket=env.BUCKET,
                Prefix=KEY_PREFIX,
                ContinuationToken=continuation_token,
                MaxKeys=30)
            self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
            if 'Contents' in result:
                result_contents = result['Contents']
                self.assertTrue(type(result_contents), list)
                contents = contents + result_contents
            if 'NextContinuationToken' in result:
                next_token = result['NextContinuationToken']
                if next_token != '':
                    continuation_token = next_token
            if 'IsTruncated' in result:
                truncated = bool(result['IsTruncated'])
            else:
                truncated = False

        for content in contents:
            key = content['Key']
            etag = content['ETag'].strip('"')
            if not key.endswith('/'):
                # validate etag with source
                self.assertEqual(etag, files[key])
                # validate etag with head result
                self.assert_head_object_result(self.s3.head_object(
                    Bucket=env.BUCKET, Key=key),
                                               etag=etag)

        # clean  up test data
        objects = []
        for content in contents:
            objects.append({'Key': content['Key']})
        self.s3.delete_objects(Bucket=env.BUCKET, Delete={'Objects': objects})
Exemplo n.º 14
0
    def test_iptable_silence(self):
        """ test wether a node slow will affect response time """
        proxy = BeansDBProxy([self.proxy_addr])
        key3 = 'key3'
        i = 0
        start_time = time.time()
        for i in range(20000):
            data3 = random_string(10)
            proxy.set(key3, data3)
            self.assertEqual(proxy.get(key3), data3)
            self.assertEqual(proxy.get(key3), data3)
        print "avg get&set time", (time.time() - start_time) / 2000

        i = 0
        self._iptable_block(self.backend1_addr)
        start_time = time.time()
        for i in range(20000):
            data3 = random_string(10)
            proxy.set(key3, data3)
            self.assertEqual(proxy.get(key3), data3)
            self.assertEqual(proxy.get(key3), data3)
        print "avg get&set time", (time.time() - start_time) / 2000
        self._iptable_unblock(self.backend1_addr)
Exemplo n.º 15
0
 def test_iptable_silence(self):
     """ test wether a node slow will affect response time """
     proxy = BeansDBProxy([self.proxy_addr])
     key3 = 'key3'
     i = 0
     start_time = time.time()
     for i in range(20000):
         data3 = random_string(10)
         proxy.set(key3, data3)
         self.assertEqual(proxy.get(key3), data3)
         self.assertEqual(proxy.get(key3), data3)
     print "avg get&set time", (time.time() - start_time) / 2000
     
     i = 0
     self._iptable_block(self.backend1_addr)
     start_time = time.time()
     for i in range(20000):
         data3 = random_string(10)
         proxy.set(key3, data3)
         self.assertEqual(proxy.get(key3), data3)
         self.assertEqual(proxy.get(key3), data3)
     print "avg get&set time", (time.time() - start_time) / 2000
     self._iptable_unblock(self.backend1_addr)
Exemplo n.º 16
0
    def test_big_value(self):
        self.backend1.start()
        store = MCStore(self.backend1_addr)

        key = "largekey"
        size = 10 * 1024 * 1024
        rsize = (((size + len(key) + 24) >> 8) + 1) << 8
        string_large = random_string(size)
        assert(store.set(key, string_large))
        assert(store.get(key) == string_large)
        self.append(rsize)
        self.assertEqual(self._get_meta(store, key), (1, 0, self.last_pos))

        assert(store.set(key, 'aaa'))
        self.append(256)
        self.assertEqual(self._get_meta(store, key), (2, 0, self.last_pos))
Exemplo n.º 17
0
    def test_big_value(self):
        self.backend1.start()
        store = MCStore(self.backend1_addr)

        key = "largekey"
        size = 10 * 1024 * 1024
        rsize = (((size + len(key) + 24) >> 8) + 1) << 8
        string_large = random_string(size)
        assert (store.set(key, string_large))
        assert (store.get(key) == string_large)
        self.append(rsize)
        self.assertEqual(self._get_meta(store, key), (1, 0, self.last_pos))

        assert (store.set(key, 'aaa'))
        self.append(256)
        self.assertEqual(self._get_meta(store, key), (2, 0, self.last_pos))
Exemplo n.º 18
0
 def __do_test_put_objects_override(self, file_size, file_num):
     """
     Put and override the same file multiple times.
     Process:
     1. Put and override file.
     2. Delete file created by this process.
     :type file_size: int
     :type file_num: int
     :param file_size: numeric value, size of file (unit: byte)
     :param file_num: numeric value, number of file to put
     :return: None
     """
     file_name = random_string(16)
     self.__do_test_put_objects(file_name=file_name,
                                file_size=file_size,
                                file_num=file_num)
     # delete object
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=BUCKET, Key=file_name))
Exemplo n.º 19
0
    def __test_transfer(self, size):
        name = random_string(16)
        key = KEY_PREFIX + name
        local_filename = os.path.join('/tmp', name)
        expect_md5 = generate_file(path=local_filename, size=size)

        # Upload parallel
        f = open(local_filename, 'rb')
        future = self.tm.upload(fileobj=f, bucket=BUCKET, key=key)
        result = wait_future_done(future, timeout=90)
        self.assertTrue(result)
        f.close()

        # Checking remote file stat
        self.assert_head_object_result(result=self.s3.head_object(
            Bucket=BUCKET, Key=key),
                                       content_length=size)

        # Download parallel
        download_filename = local_filename + "_dl"
        f = open(download_filename, 'wb+')
        future = self.tm.download(fileobj=f, bucket=BUCKET, key=key)
        result = wait_future_done(future, timeout=90)
        self.assertTrue(result)
        f.flush()

        # Checking download file
        f.seek(0)
        actual_md5 = compute_md5(f.read())
        f.close()
        self.assertEqual(actual_md5, expect_md5)

        # Remove remote and local files
        os.remove(local_filename)
        os.remove(download_filename)
        self.assert_delete_object_result(
            result=self.s3.delete_object(Bucket=BUCKET, Key=key))
        self.assert_delete_object_result(
            result=self.s3.delete_object(Bucket=BUCKET, Key=KEY_PREFIX))
Exemplo n.º 20
0
 def test_head_object(self):
     size = 1024 * 256
     self.assert_head_bucket_result(self.s3.head_bucket(Bucket=env.BUCKET))
     key = KEY_PREFIX + random_string(16)
     body = random_bytes(size)
     expect_md5 = compute_md5(body)
     self.assert_put_object_result(result=self.s3.put_object(
         Bucket=env.BUCKET, Key=key, Body=body),
                                   etag=expect_md5)
     self.assert_head_object_result(result=self.s3.head_object(
         Bucket=env.BUCKET, Key=key),
                                    etag=expect_md5,
                                    content_length=size)
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=env.BUCKET, Key=key))
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=env.BUCKET, Key=KEY_PREFIX))
     try:
         self.s3.head_object(Bucket=env.BUCKET, Key=key)
         self.fail()  # Non exception occurred is illegal.
     except Exception as e:
         # Error code 404 is legal.
         self.assert_client_error(e, expect_status_code=404)
Exemplo n.º 21
0
    def __init__(self, case):
        super(ObjectGetRangeTest, self).__init__(case)
        self.s3 = get_env_s3_client()
        self.file_size = 10000
        self.file_key = KEY_PREFIX + random_string(16)
        self.test_cases = [
            { "range":"bytes=0-499", "status_code": 206, "content-range":"bytes 0-499/10000", "content-length": 500 },
            { "range":"bytes=500-999", "status_code": 206, "content-range":"bytes 500-999/10000",  "content-length": 500 },
            { "range":"bytes=9500-", "status_code": 206, "content-range":"bytes 9500-9999/10000",  "content-length": 500 },
            { "range":"bytes=0-", "status_code": 206, "content-range":"bytes 0-9999/10000",  "content-length": 10000 },
            { "range":"bytes=0-0", "status_code": 206, "content-range":"bytes 0-0/10000",  "content-length": 1 },
            { "range":"bytes=-500", "status_code": 206, "content-range":"bytes 9500-9999/10000", "content-length": 500 },
            { "range":"bytes=-1", "status_code": 206, "content-range":"bytes 9999-9999/10000",  "content-length": 1 },
            { "range":"bytes=-0", "status_code": 206, "content-range":"bytes 0-9999/10000", "content-length": 10000 },
            { "range":"bytes=1-0", "status_code": 416 },
            { "range":"bytes=10", "status_code": 416 },
            { "range":"bytes=", "status_code": 416 },
            { "range":"bytes=abc", "status_code": 416 },
            { "range":"bytes=abc-123", "status_code": 416 },
            { "range":"1-0", "status_code": 416 },
        ]

        self._init_object()
Exemplo n.º 22
0
 def test_put_directory(self):
     key = random_string(16)
     content_type = 'application/directory'
     md5 = compute_md5(bytes())
     # Put a directory object
     self.assert_put_object_result(result=self.s3.put_object(
         Bucket=BUCKET, Key=key, ContentType=content_type),
                                   etag=md5)
     # Get the directory info
     self.assert_head_object_result(result=self.s3.head_object(
         Bucket=BUCKET, Key=key + '/'),
                                    etag=md5,
                                    content_type=content_type,
                                    content_length=0)
     # Get directory object
     self.assert_get_object_result(result=self.s3.get_object(Bucket=BUCKET,
                                                             Key=key + '/'),
                                   etag=md5,
                                   content_type=content_type,
                                   content_length=0,
                                   body_md5=md5)
     # Delete the directory
     self.assert_delete_object_result(
         result=self.s3.delete_object(Bucket=BUCKET, Key=key + '/'))
Exemplo n.º 23
0
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.

# -*- coding: utf-8 -*-i

import env
from base import S3TestCase, get_env_s3_client
from base import random_string

KEY_PREFIX = 'test-tagging-%s/' % random_string(8)

empty_tag_set = []


def generate_tag_set(size=10, key_length=8, value_length=16):
    tag_set = []
    for _ in range(size):
        tag_set.append({'Key': random_string(key_length), 'Value': random_string(value_length)})
    return tag_set


def encode_tag_set(tag_set):
    encoded = ''
    for tag in tag_set:
        if len(encoded) > 0:
Exemplo n.º 24
0
def generate_tag_set(size=10, key_length=8, value_length=16):
    tag_set = []
    for _ in range(size):
        tag_set.append({'Key': random_string(key_length), 'Value': random_string(value_length)})
    return tag_set
Exemplo n.º 25
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.

# -*- coding: utf-8 -*-
import os
from boto3.s3.transfer import TransferManager, TransferConfig

from base import S3TestCase
from base import random_string, compute_md5, generate_file, wait_future_done, get_env_s3_client
from env import BUCKET

KEY_PREFIX = 'test-transfer-%s/' % random_string(8)


class TransferTest(S3TestCase):
    def __init__(self, case):
        super(TransferTest, self).__init__(case)
        self.s3 = get_env_s3_client()
        tc = TransferConfig(multipart_threshold=5 * 1024 * 1024,
                            max_concurrency=10,
                            multipart_chunksize=5 * 1024 * 1024,
                            num_download_attempts=5,
                            max_io_queue=100,
                            io_chunksize=262144,
                            use_threads=True)
        self.tm = TransferManager(self.s3, tc)
Exemplo n.º 26
0
#!/usr/bin/env python
# coding:utf-8

import os
import sys
import time
from base import BeansdbInstance, TestBeansdbBase, MCStore
from base import check_data_hint_integrity, delete_hint_and_htree, random_string
import unittest

string_large = random_string(10*1024*1024)


class TestGenerateData(TestBeansdbBase):

    proxy_addr = 'localhost:7905'
    backend1_addr = 'localhost:57901'

    def setUp(self):
        self._clear_dir()
        self._init_dir()
        self.backend1 = BeansdbInstance(self.data_base_path, 57901)

    def test_gen_data(self):
        self.backend1.start()
        store = MCStore(self.backend1_addr)
        self.assert_(store.set("largekey", string_large))
        self.assert_(store.get("largekey") == string_large)

        loop_num = 16 * 1024
        for i in xrange(loop_num):
Exemplo n.º 27
0
    def test_list_object_page_v2(self):

        self.clear_data()

        time.sleep(5)

        files = {}  # key -> etag

        file_keys = []
        prefix_keys = {}
        for test_file in self.test_files:
            for _ in range(test_file.get('file_num', 1)):
                #key = KEY_PREFIX + random_string(16)
                prefix = test_file['prefix']
                key = prefix + test_file['name'] + random_string(16)
                if not prefix_keys.get(prefix):
                    prefix_keys[prefix] = []
                prefix_keys.get(prefix).append(key)
                test_file['key'] = key
                file_keys.append({'Key': key})
                result = self.s3.put_object(Bucket=env.BUCKET, Key=key, Body=random_bytes(16))
                self.assert_put_object_result(result=result)
                files[key] = result['ETag'].strip('"')
                #print("put object key: {}".format(key))

        file_keys = sorted(file_keys, key=lambda f: f['Key'])
        last_key = file_keys[-1].get('Key')

        prefixs = []
        [ prefixs.append(f.get('prefix')) for f in self.test_files if f.get('prefix') not in prefixs ]

        # validate list result
        for prefix in prefixs:
            prefix_keys = [ f['Key'] for f in file_keys if f['Key'].startswith(prefix) ]
            contents = []
            continuation_token = ''
            last_marker = ''
            truncated = True
            while truncated:
                result = self.s3.list_objects_v2(
                    Bucket=env.BUCKET,
                    Prefix=prefix,
                    ContinuationToken=continuation_token,
                    MaxKeys=self.max_page_size)
                self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
                if 'Contents' in result:
                    result_contents = result['Contents']
                    self.assertTrue(type(result_contents), list)
                    contents = contents + result_contents
                if 'NextContinuationToken' in result:
                    next_token = result['NextContinuationToken']
                    if next_token != '':
                        last_marker = continuation_token
                        continuation_token = next_token
                if 'IsTruncated' in result:
                    truncated = bool(result['IsTruncated'])
                else:
                    truncated = False
                if truncated and last_marker != '':
                    #print("list object truncated: last_marker {} page_first_key: {} ".format(last_marker, result_contents[0]['Key'] ))
                    self.assertEqual(last_marker, result_contents[0]['Key'])

            # if prefix_keys[-1] != contents[-1]['Key']:
            #     print("{} {}", prefix_keys[-1], contents[-1]['Key'])
            self.assertEqual(prefix_keys[-1], contents[-1]['Key'])

        truncated = True
        prefix=""
        continuation_token=""
        file_count = 0
        while truncated:
            result = self.s3.list_objects_v2(
                Bucket=env.BUCKET,
                Prefix=prefix,
                ContinuationToken=continuation_token,
                MaxKeys=self.max_page_size)
            truncated = bool(result.get('IsTruncated', False))
            continuation_token=result.get('NextContinuationToken', "")
            for content in result.get('Contents', {}):
                if not content.get('Key', "").endswith("/"):
                    file_count += 1
        self.assertEqual(file_count, len(file_keys))

        self.s3.delete_objects(
            Bucket=env.BUCKET,
            Delete={'Objects': file_keys}
        )
Exemplo n.º 28
0
    def test_list_object_page_v1(self):
        '''
        test list object page
        '''
        files = {}  # key -> etag

        self.clear_data()

        time.sleep(5)

        file_keys = []
        prefix_keys = {}
        for test_file in self.test_files:
            for _ in range(test_file.get('file_num', 1)):
                #key = KEY_PREFIX + random_string(16)
                prefix = test_file['prefix']
                key = prefix + test_file['name'] + random_string(16)
                if not prefix_keys.get(prefix):
                    prefix_keys[prefix] = []
                prefix_keys.get(prefix).append(key)
                test_file['key'] = key
                file_keys.append({'Key': key})
                result = self.s3.put_object(Bucket=env.BUCKET, Key=key, Body=random_bytes(16))
                self.assert_put_object_result(result=result)
                files[key] = result['ETag'].strip('"')
                #print("put object key: {}".format(key))

        file_keys = sorted(file_keys, key=lambda f: f['Key'])
        last_key = file_keys[-1].get('Key')

        # print("put object key count: {}, last_key: {} ".format(len(file_keys), last_key ))

        prefixs = []
        [ prefixs.append(f.get('prefix')) for f in self.test_files if f.get('prefix') not in prefixs ]

        file_count = 0
        for prefix in prefixs:
            # print("prefix_keys: {} {}".format(prefix, prefix_keys.get(prefix, [])))
            prefix_keys = [ f['Key'] for f in file_keys if f['Key'].startswith(prefix) ]
            marker = ''
            # validate list result
            contents = []
            truncated = True
            last_marker = ''
            while truncated:
                result_contents = []
                result = self.s3.list_objects(Bucket=env.BUCKET, Prefix=prefix, Marker=marker, MaxKeys=self.max_page_size)
                self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
                if 'Contents' in result:
                    result_contents = result['Contents']
                    #print("list response content type: {}".format(type(result_contents)))
                    # self.assertTrue(type(result_contents), list)
                    contents = contents + result_contents

                if 'NextMarker' in result:
                    next_marker = result['NextMarker']
                    if next_marker != '':
                        last_marker = marker
                        marker = next_marker

                if 'IsTruncated' in result:
                    truncated = bool(result['IsTruncated'])
                else:
                    truncated = False

                if not truncated:
                    end = True

                # print("list object count: {}, maxKey: {}, prefix: {}, first_key: {}, last_key: {}, next_marker: {}, truncate: {} ".format( \
                #      len(result_contents), self.max_page_size, prefix, result_contents[0]['Key'], result_contents[-1]['Key'], marker, truncated))

                if truncated and last_marker != '':
                    # print("list object truncated: last_marker {} page_first_key: {} ".format(last_marker, result_contents[0]['Key'] ))
                    self.assertEqual(last_marker, result_contents[0]['Key'])

            # print("list object total: {}, maxKey: {}, prefix: {}, first_key: {}, last_key: {}, next_marker: {} ".format( \
            #     len(contents), self.max_page_size, prefix, contents[0]['Key'], contents[-1]['Key'], marker))

            # if prefix_keys[-1] != contents[-1]['Key']:
            #     print("{} {}", prefix_keys[-1], contents[-1]['Key'])
            self.assertEqual(prefix_keys[-1], contents[-1]['Key'])

        truncated = True
        prefix=""
        marker=""
        file_count = 0
        while truncated:
            # print("marker: {}".format(marker))
            result = self.s3.list_objects(Bucket=env.BUCKET, Prefix=prefix, Marker=marker, MaxKeys=self.max_page_size)
            truncated = bool(result.get('IsTruncated', False))
            marker=result.get('NextMarker', "")
            for content in result.get('Contents', {}):
                # print("file: {}".format(content.get('Key')))
                if not content.get('Key', "").endswith("/"):
                    file_count += 1
            # print("next_marker: {} {}".format(truncated, marker))
        self.assertEqual(file_count, len(file_keys))

        self.s3.delete_objects(
            Bucket=env.BUCKET,
            Delete={'Objects': file_keys}
        )
Exemplo n.º 29
0
import os
import sys
import time
from base import BeansdbInstance, TestBeansdbBase, MCStore
from base import get_hash, check_data_hint_integrity
from base import random_string, delete_hint_and_htree, temper_with_key_value
import unittest
import telnetlib
import glob
import quicklz
import struct
import re
from gc_simple import TestGCBase

string_large = random_string(10 * 1024 * 1024)


class TestBrokenBase(TestGCBase):
    proxy_addr = 'localhost:7905'
    backend1_addr = 'localhost:57901'

    #
    def setUp(self):
        self._clear_dir()
        self._init_dir()
        self.backend1 = BeansdbInstance(self.data_base_path, 57901, db_depth=1)

    # only generate keys in sector0
    def _gen_data(self, data, prefix='', loop_num=10 * 1024, sector=0):
        store = MCStore(self.backend1_addr)