Ejemplo n.º 1
0
 def start_access_phase(self, master):
     loaders = []
     rest = RestConnection(master)
     for bucket in rest.get_buckets():
         loader = dict()
         loader["mcsoda"] = LoadWithMcsoda(
             master,
             self.keys_count / 2,
             bucket=bucket.name,
             rest_password=master.rest_password,
             prefix=str(bucket.name),
             port=8091)
         loader["mcsoda"].cfg["ratio-sets"] = 0.8
         loader["mcsoda"].cfg["ratio-hot"] = 0.2
         loader["mcsoda"].cfg["ratio-creates"] = 0.5
         loader["mcsoda"].cfg["ratio-deletes"] = self.ratio_deletes
         loader["mcsoda"].cfg["ratio-expirations"] = self.ratio_expiry
         loader["mcsoda"].cfg["json"] = 0
         loader["thread"] = Thread(target=loader["mcsoda"].load_data,
                                   name='mcloader_' + bucket.name)
         loader["thread"].daemon = True
         loaders.append(loader)
     for loader in loaders:
         loader["thread"].start()
     return loaders
Ejemplo n.º 2
0
 def start_load_phase(self, master):
     loaders = []
     rest = RestConnection(master)
     for bucket in rest.get_buckets():
         loader = dict()
         loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, bucket=bucket.name,
             password=bucket.saslPassword, prefix=str(bucket.name), port=8091)
         loader["mcsoda"].cfg["exit-after-creates"] = 1
         loader["mcsoda"].cfg["json"] = 0
         loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
         loader["thread"].daemon = True
         loaders.append(loader)
     for loader in loaders:
         loader["thread"].start()
     return loaders
Ejemplo n.º 3
0
 def start_load_phase(self, master):
     loaders = []
     rest = RestConnection(master)
     for bucket in rest.get_buckets():
         loader = dict()
         loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, bucket=bucket.name,
                             rest_password=master.rest_password, prefix=str(bucket.name), port=8091)
         loader["mcsoda"].cfg["exit-after-creates"] = 1
         loader["mcsoda"].cfg["json"] = 0
         loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
         loader["thread"].daemon = True
         loaders.append(loader)
     for loader in loaders:
         loader["thread"].start()
     return loaders
Ejemplo n.º 4
0
 def start_access_phase(self, master):
     loaders = []
     rest = RestConnection(master)
     for bucket in rest.get_buckets():
         loader = dict()
         loader["mcsoda"] = LoadWithMcsoda(
             master,
             self.keys_count / 2,
             bucket=bucket.name,
             password=bucket.saslPassword,
             prefix=str(bucket.name),
             port=8091,
         )
         loader["mcsoda"].cfg["ratio-sets"] = 0.8
         loader["mcsoda"].cfg["ratio-hot"] = 0.2
         loader["mcsoda"].cfg["ratio-creates"] = 0.5
         loader["mcsoda"].cfg["ratio-deletes"] = self.ratio_deletes
         loader["mcsoda"].cfg["ratio-expirations"] = self.ratio_expiry
         loader["mcsoda"].cfg["json"] = 0
         loader["thread"] = Thread(target=loader["mcsoda"].load_data, name="mcloader_" + bucket.name)
         loader["thread"].daemon = True
         loaders.append(loader)
     for loader in loaders:
         loader["thread"].start()
     return loaders
Ejemplo n.º 5
0
 def start_access_phase(self, master):
     loaders = []
     rest = RestConnection(master)
     for bucket in rest.get_buckets():
         loader = {}
         loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, bucket=bucket.name,\
                 password=bucket.saslPassword, prefix=str(bucket.name))
         loader["mcsoda"].cfg["exit-after-creates"] = 1
         loader["mcsoda"].cfg["ratio-sets"] = 0.8
         loader["mcsoda"].cfg["ratio-hot"] = 0.2
         loader["mcsoda"].cfg["ratio-creates"] = 0.5
         loader["mcsoda"].cfg["ratio-deletes"] = 0.13
         loader["mcsoda"].cfg["ratio-expirations"] = 0.03
         loader["mcsoda"].cfg["json"] = 0
         loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_'+bucket.name)
         loader["thread"].daemon = True
         loaders.append(loader)
     self.loaders = loaders
     for loader in loaders:
         loader["thread"].start()
     self.load_started = True
     return loaders
Ejemplo n.º 6
0
    def test_verify_memcache_connections(self):
        allowed_memcached_conn = self._input.param("allowed_connections", 100)
        max_ops_per_second = self._input.param("max_ops_per_second", 2500)
        min_item_size = self._input.param("min_item_size", 128)
        num_docs = self._input.param("num_docs", 30000)
        # start load, max_ops_per_second is the combined limit for all buckets
        mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
        mcsodaLoad.cfg["max-ops"] = 0
        mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
        mcsodaLoad.cfg["exit-after-creates"] = 1
        mcsodaLoad.cfg["min-value-size"] = min_item_size
        mcsodaLoad.cfg["json"] = 0
        mcsodaLoad.cfg["batch"] = 100
        loadDataThread = Thread(target=mcsodaLoad.load_data,
                                  name='mcloader_default')
        loadDataThread.daemon = True
        loadDataThread.start()

        src_remote_shell = RemoteMachineShellConnection(self.src_master)
        machine_type = src_remote_shell.extract_remote_info().type.lower()
        while (loadDataThread.isAlive() and machine_type == 'linux'):
            command = "netstat -lpnta | grep 11210 | grep TIME_WAIT | wc -l"
            output, _ = src_remote_shell.execute_command(command)
            if int(output[0]) > allowed_memcached_conn:
                # stop load
                mcsodaLoad.load_stop()
                loadDataThread.join()
                self.fail("Memcached connections {0} are increased above {1} \
                            on Source node".format(
                                                   allowed_memcached_conn,
                                                   int(output[0])))
            self.sleep(5)

        # stop load
        mcsodaLoad.load_stop()
        loadDataThread.join()
Ejemplo n.º 7
0
    def test_verify_memcache_connections(self):
        allowed_memcached_conn = self._input.param("allowed_connections", 100)
        max_ops_per_second = self._input.param("max_ops_per_second", 2500)
        min_item_size = self._input.param("min_item_size", 128)
        num_docs = self._input.param("num_docs", 30000)
        # start load, max_ops_per_second is the combined limit for all buckets
        mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
        mcsodaLoad.cfg["max-ops"] = 0
        mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
        mcsodaLoad.cfg["exit-after-creates"] = 1
        mcsodaLoad.cfg["min-value-size"] = min_item_size
        mcsodaLoad.cfg["json"] = 0
        mcsodaLoad.cfg["batch"] = 100
        loadDataThread = Thread(target=mcsodaLoad.load_data,
                                  name='mcloader_default')
        loadDataThread.daemon = True
        loadDataThread.start()

        src_remote_shell = RemoteMachineShellConnection(self.src_master)
        machine_type = src_remote_shell.extract_remote_info().type.lower()
        while (loadDataThread.isAlive() and machine_type == 'linux'):
            command = "netstat -lpnta | grep 11210 | grep TIME_WAIT | wc -l"
            output, _ = src_remote_shell.execute_command(command)
            if int(output[0]) > allowed_memcached_conn:
                # stop load
                mcsodaLoad.load_stop()
                loadDataThread.join()
                self.fail("Memcached connections {0} are increased above {1} \
                            on Source node".format(
                                                   allowed_memcached_conn,
                                                   int(output[0])))
            self.sleep(5)

        # stop load
        mcsodaLoad.load_stop()
        loadDataThread.join()
Ejemplo n.º 8
0
    def _common_test_body(self):
        master = self.servers[0]
        rest = RestConnection(master)

        # start load, max_ops_per_second is the combined limit for all buckets
        buckets = rest.get_buckets()
        loaders = []
        self.log.info("max-ops-per-second per bucket: {0}".format(self.max_ops_per_second / len(buckets)))
        for bucket in buckets:
            loader = {}
            loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, prefix='', bucket=bucket.name,
                password=bucket.saslPassword, protocol='membase-binary')
            loader["mcsoda"].cfg["max-ops"] = 0
            loader["mcsoda"].cfg["max-ops-per-sec"] = self.max_ops_per_second / len(buckets)
            loader["mcsoda"].cfg["exit-after-creates"] = 0
            loader["mcsoda"].cfg["min-value-size"] = self.min_item_size
            loader["mcsoda"].cfg["json"] = 0
            loader["mcsoda"].cfg["batch"] = 100
            loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
            loader["thread"].daemon = True
            loaders.append(loader)

        for loader in loaders:
            loader["thread"].start()

        for iteration in range(self.repeat):
            for server in self.servers[1:]:
                self.log.info("iteration {0}: ".format(iteration))
                self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
                self.log.info("adding node {0} and rebalance afterwards".format(server.ip))

                rebalance_done = False
                rebalance_try = 0
                while not rebalance_done:
                    try:
                        ClusterOperationHelper.begin_rebalance_in(master, [server])
                        ClusterOperationHelper.end_rebalance(master)
                        rebalance_done = True
                    except AssertionError as e:
                        rebalance_try += 1
                        self.log.error(e)
                        time.sleep(5)
                        if rebalance_try > 5:
                            raise e

            for server in self.servers[1:]:
                self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
                self.log.info("removing node {0} and rebalance afterwards".format(server.ip))

                rebalance_done = False
                rebalance_try = 0
                while not rebalance_done:
                    try:
                        ClusterOperationHelper.begin_rebalance_out(master, [server])
                        ClusterOperationHelper.end_rebalance(master)
                        rebalance_done = True
                    except AssertionError as e:
                        rebalance_try += 1
                        self.log.error(e)
                        time.sleep(5)
                        if rebalance_try > 5:
                            raise e

        # stop load
        for loader in loaders:
            loader["mcsoda"].load_stop()

        for loader in loaders:
            loader["thread"].join()