Exemple #1
0
 def verify_persistence(servers,
                        test,
                        keys_count=400000,
                        timeout_in_seconds=300):
     log = logger.Logger.get_logger()
     master = servers[0]
     rest = RestConnection(master)
     log.info("Verifying Persistence")
     buckets = rest.get_buckets()
     for bucket in buckets:
         # Load some data
         l_threads = MemcachedClientHelper.create_threads([master],
                                                          bucket.name, -1,
                                                          keys_count, {
                                                              1024: 0.50,
                                                              512: 0.50
                                                          }, 2, -1, True,
                                                          True)
         [t.start() for t in l_threads]
         # Do persistence verification
         ready = ClusterOperationHelper.persistence_verification(
             servers, bucket.name, timeout_in_seconds)
         log.info("Persistence Verification returned ? {0}".format(ready))
         log.info("waiting for persistence threads to finish...")
         for t in l_threads:
             t.aborted = True
         for t in l_threads:
             t.join()
         log.info("persistence thread has finished...")
         test.assertTrue(ready, msg="Cannot verify persistence")
Exemple #2
0
    def common_test_body(self, replica, load_ratio, timeout=10):
        log = logger.Logger.get_logger()
        start_time = time.time()
        log.info("replica : {0}".format(replica))
        log.info("load_ratio : {0}".format(load_ratio))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.mcdMemoryReserved * 2 / 3
        rest.create_bucket(bucket='default',
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=11211)
        json_bucket = {'name': 'default', 'port': 11211, 'password': ''}
        BucketOperationHelper.wait_for_memcached(master, json_bucket)
        log.info("inserting some items in the master before adding any nodes")
        distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1}
        threads = MemcachedClientHelper.create_threads(
            servers=[master],
            value_size_distribution=distribution,
            number_of_threads=len(self._servers),
            number_of_items=400000000,
            moxi=False,
            write_only=True,
            async_write=True)
        for thread in threads:
            thread.terminate_in_minutes = 24 * 60
            thread.start()
        while time.time() < (start_time + 60 * timeout):
            #rebalance out step nodes
            #let's add some items ?
            nodes = rest.node_statuses()
            delta = len(self._servers) - len(nodes)
            if delta > 0:
                if delta > 1:
                    how_many_add = Random().randint(1, delta)
                else:
                    how_many_add = 1
                self.log.info("going to add {0} nodes".format(how_many_add))
                self.rebalance_in(how_many=how_many_add)
            else:
                self.log.info("all nodes already joined the clustr")
            time.sleep(240)
            RestHelper(rest).wait_for_replication(600)
            #dont rebalance out if there are not too many nodes
            if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
                nodes = rest.node_statuses()
                how_many_out = Random().randint(1, len(nodes) - 1)
                self.log.info("going to remove {0} nodes".format(how_many_out))
                self.rebalance_out(how_many=how_many_out)

        for t in threads:
            t.aborted = True
            t.join()
    def common_test_body(self, replica, load_ratio, timeout=10):
        log = logger.Logger.get_logger()
        start_time = time.time()
        log.info("replica : {0}".format(replica))
        log.info("load_ratio : {0}".format(load_ratio))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.mcdMemoryReserved * 2 / 3
        rest.create_bucket(bucket='default',
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=11211)
        json_bucket = {'name': 'default', 'port': 11211, 'password': ''}
        BucketOperationHelper.wait_for_memcached(master, json_bucket)
        log.info("inserting some items in the master before adding any nodes")
        distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1}
        threads = MemcachedClientHelper.create_threads(servers=[master],
                                                       value_size_distribution=distribution,
                                                       number_of_threads=len(self._servers),
                                                       number_of_items=400000000,
                                                       moxi=False,
                                                       write_only=True,
                                                       async_write=True)
        for thread in threads:
            thread.terminate_in_minutes = 24 * 60
            thread.start()
        while time.time() < ( start_time + 60 * timeout):
            #rebalance out step nodes
            #let's add some items ?
            nodes = rest.node_statuses()
            delta = len(self._servers) - len(nodes)
            if delta > 0:
                if delta > 1:
                    how_many_add = Random().randint(1, delta)
                else:
                    how_many_add = 1
                self.log.info("going to add {0} nodes".format(how_many_add))
                self.rebalance_in(how_many=how_many_add)
            else:
                self.log.info("all nodes already joined the clustr")
            time.sleep(240)
            RestHelper(rest).wait_for_replication(600)
            #dont rebalance out if there are not too many nodes
            if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
                nodes = rest.node_statuses()
                how_many_out = Random().randint(1, len(nodes) - 1)
                self.log.info("going to remove {0} nodes".format(how_many_out))
                self.rebalance_out(how_many=how_many_out)

        for t in threads:
            t.aborted = True
            t.join()
Exemple #4
0
    def load_bucket_and_return_the_keys(servers=None,
                                        name='default',
                                        ram_load_ratio=-1,
                                        number_of_items=-1,
                                        value_size_distribution=None,
                                        number_of_threads=1,
                                        override_vBucketId=-1,
                                        write_only=False,
                                        moxi=True,
                                        delete_ratio=0,
                                        expiry_ratio=0):
        inserted_keys = []
        rejected_keys = []
        log = logger.Logger.get_logger()
        threads = MemcachedClientHelper.create_threads(
            servers,
            name,
            ram_load_ratio,
            number_of_items,
            value_size_distribution,
            number_of_threads,
            override_vBucketId,
            write_only=write_only,
            moxi=moxi,
            delete_ratio=delete_ratio,
            expiry_ratio=expiry_ratio)

        # we can start them!
        for thread in threads:
            thread.start()
        log.info("waiting for all worker thread to finish their work...")
        [thread.join() for thread in threads]
        log.info("worker threads are done...")

        inserted_count = 0
        rejected_count = 0
        deleted_count = 0
        expired_count = 0
        for thread in threads:
            t_inserted, t_rejected = thread.keys_set()
            inserted_count += thread.inserted_keys_count()
            rejected_count += thread.rejected_keys_count()
            deleted_count += thread._delete_count
            expired_count += thread._expiry_count
            inserted_keys.extend(t_inserted)
            rejected_keys.extend(t_rejected)
        msg = "inserted keys count : {0} , rejected keys count : {1}"
        log.info(msg.format(inserted_count, rejected_count))
        msg = "deleted keys count : {0} , expired keys count : {1}"
        log.info(msg.format(deleted_count, expired_count))
        return inserted_keys, rejected_keys
Exemple #5
0
 def threads_for_buckets(rest, load_ratio, distribution, rebalanced_servers, bucket_data, delete_ratio=0,
                         expiry_ratio=0):
     buckets = rest.get_buckets()
     for bucket in buckets:
         threads = MemcachedClientHelper.create_threads(servers=rebalanced_servers,
             name=bucket.name,
             ram_load_ratio=load_ratio,
             value_size_distribution=distribution,
             number_of_threads=4,
             delete_ratio=delete_ratio,
             expiry_ratio=expiry_ratio)
         [t.start() for t in threads]
         bucket_data[bucket.name]["threads"] = threads
     return bucket_data
    def load_bucket_and_return_the_keys(servers=None,
                                        name='default',
                                        ram_load_ratio=-1,
                                        number_of_items=-1,
                                        value_size_distribution=None,
                                        number_of_threads=1,
                                        override_vBucketId=-1,
                                        write_only=False,
                                        moxi=True,
                                        delete_ratio=0,
                                        expiry_ratio=0):
        inserted_keys = []
        rejected_keys = []
        log = logger.Logger.get_logger()
        threads = MemcachedClientHelper.create_threads(servers,
                                                       name,
                                                       ram_load_ratio,
                                                       number_of_items,
                                                       value_size_distribution,
                                                       number_of_threads,
                                                       override_vBucketId,
                                                       write_only=write_only,
                                                       moxi=moxi,
                                                       delete_ratio=delete_ratio,
                                                       expiry_ratio=expiry_ratio)

        # we can start them!
        for thread in threads:
            thread.start()
        log.info("waiting for all worker thread to finish their work...")
        [thread.join() for thread in threads]
        log.info("worker threads are done...")

        inserted_count = 0
        rejected_count = 0
        deleted_count = 0
        expired_count = 0
        for thread in threads:
            t_inserted, t_rejected = thread.keys_set()
            inserted_count += thread.inserted_keys_count()
            rejected_count += thread.rejected_keys_count()
            deleted_count += thread._delete_count
            expired_count += thread._expiry_count
            inserted_keys.extend(t_inserted)
            rejected_keys.extend(t_rejected)
        msg = "inserted keys count : {0} , rejected keys count : {1}"
        log.info(msg.format(inserted_count, rejected_count))
        msg = "deleted keys count : {0} , expired keys count : {1}"
        log.info(msg.format(deleted_count, expired_count))
        return inserted_keys, rejected_keys
 def verify_persistence(servers, test, keys_count=400000, timeout_in_seconds=300):
     log = logger.Logger.get_logger()
     master = servers[0]
     rest = RestConnection(master)
     log.info("Verifying Persistence")
     buckets = rest.get_buckets()
     for bucket in buckets:
     #Load some data
         l_threads = MemcachedClientHelper.create_threads([master], bucket.name,
                                                                  - 1, keys_count, {1024: 0.50, 512: 0.50}, 2, -1,
                                                                  True, True)
         [t.start() for t in l_threads]
         # Do persistence verification
         ready = ClusterOperationHelper.persistence_verification(servers, bucket.name, timeout_in_seconds)
         log.info("Persistence Verification returned ? {0}".format(ready))
         log.info("waiting for persistence threads to finish...")
         for t in l_threads:
             t.aborted = True
         for t in l_threads:
             t.join()
         log.info("persistence thread has finished...")
         test.assertTrue(ready, msg="Cannot verify persistence")