예제 #1
0
파일: moxi.py 프로젝트: Boggypop/testrunner
 def rebalance(self):
     while not self.finished:
         ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
         ClusterOperationHelper.end_rebalance(self.master)
         if not self.finished:
             ClusterOperationHelper.begin_rebalance_out(self.master, self.servers[-1:])
             ClusterOperationHelper.end_rebalance(self.master)
예제 #2
0
 def rebalance(self):
     while not self.finished:
         ClusterOperationHelper.begin_rebalance_in(self.master,
                                                   self.servers)
         ClusterOperationHelper.end_rebalance(self.master)
         if not self.finished:
             ClusterOperationHelper.begin_rebalance_out(
                 self.master, self.servers[-1:])
             ClusterOperationHelper.end_rebalance(self.master)
예제 #3
0
    def _common_test_body(self):
        master = self.servers[0]
        rest = RestConnection(master)

        # start load, max_ops_per_second is the combined limit for all buckets
        buckets = rest.get_buckets()
        loaders = []
        self.log.info("max-ops-per-second per bucket: {0}".format(self.max_ops_per_second / len(buckets)))
        for bucket in buckets:
            loader = {}
            loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, prefix='', bucket=bucket.name,
                password=bucket.saslPassword, protocol='membase-binary')
            loader["mcsoda"].cfg["max-ops"] = 0
            loader["mcsoda"].cfg["max-ops-per-sec"] = self.max_ops_per_second / len(buckets)
            loader["mcsoda"].cfg["exit-after-creates"] = 0
            loader["mcsoda"].cfg["min-value-size"] = self.min_item_size
            loader["mcsoda"].cfg["json"] = 0
            loader["mcsoda"].cfg["batch"] = 100
            loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
            loader["thread"].daemon = True
            loaders.append(loader)

        for loader in loaders:
            loader["thread"].start()

        for iteration in range(self.repeat):
            for server in self.servers[1:]:
                self.log.info("iteration {0}: ".format(iteration))
                self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
                self.log.info("adding node {0} and rebalance afterwards".format(server.ip))

                rebalance_done = False
                rebalance_try = 0
                while not rebalance_done:
                    try:
                        ClusterOperationHelper.begin_rebalance_in(master, [server])
                        ClusterOperationHelper.end_rebalance(master)
                        rebalance_done = True
                    except AssertionError as e:
                        rebalance_try += 1
                        self.log.error(e)
                        time.sleep(5)
                        if rebalance_try > 5:
                            raise e

            for server in self.servers[1:]:
                self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
                self.log.info("removing node {0} and rebalance afterwards".format(server.ip))

                rebalance_done = False
                rebalance_try = 0
                while not rebalance_done:
                    try:
                        ClusterOperationHelper.begin_rebalance_out(master, [server])
                        ClusterOperationHelper.end_rebalance(master)
                        rebalance_done = True
                    except AssertionError as e:
                        rebalance_try += 1
                        self.log.error(e)
                        time.sleep(5)
                        if rebalance_try > 5:
                            raise e

        # stop load
        for loader in loaders:
            loader["mcsoda"].load_stop()

        for loader in loaders:
            loader["thread"].join()
예제 #4
0
    def test_getr(self):
        item_count = self.input.param("item_count", 10000)
        replica_count = self.input.param("replica_count", 1)
        expiration = self.input.param("expiration", 0)
        delay = float(self.input.param("delay", 0))
        eject = self.input.param("eject", 0)
        delete = self.input.param("delete", 0)
        mutate = self.input.param("mutate", 0)
        warmup = self.input.param("warmup", 0)
        skipload = self.input.param("skipload", 0)
        rebalance = self.input.param("rebalance", 0)

        negative_test = False
        if delay > expiration:
            negative_test = True
        if delete and not mutate:
            negative_test = True
        if skipload and not mutate:
            negative_test = True

        prefix = str(uuid.uuid4())[:7]

        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
        BucketOperationHelper.create_bucket(self.master, name=self.default_bucket_name, replica=replica_count, port=11210, test_case=self, bucket_ram=-1, password="")

        if rebalance == GetrTests.DURING_REBALANCE or rebalance == GetrTests.AFTER_REBALANCE:
            # leave 1 node unclustered for rebalance in
            ClusterOperationHelper.begin_rebalance_out(self.master, self.servers[-1:])
            ClusterOperationHelper.end_rebalance(self.master)
            ClusterOperationHelper.begin_rebalance_in(self.master, self.servers[:-1])
            ClusterOperationHelper.end_rebalance(self.master)
        else:
            ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
            ClusterOperationHelper.end_rebalance(self.master)

        vprefix = ""
        if not skipload:
            self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
            if not expiration:
                RebalanceHelper.wait_for_stats_int_value(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), "<=", 600, True)

        if delete:
            self._delete_items(item_count=item_count, prefix=prefix)

        if mutate:
            vprefix = "mutated"
            self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)

        self.assertTrue(RebalanceHelper.wait_for_replication(self.rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        if eject:
            self._eject_items(item_count=item_count, prefix=prefix)

        if delay:
            self.sleep(delay)

        if rebalance == GetrTests.DURING_REBALANCE:
            ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
        if rebalance == GetrTests.AFTER_REBALANCE:
            ClusterOperationHelper.end_rebalance(self.master)
        if warmup:
            self.log.info("restarting memcached")
            command = "rpc:multicall(erlang, apply, [fun () -> try ns_server_testrunner_api:restart_memcached(20000) catch _:_ -> ns_port_sup:restart_port_by_name(memcached) end end, []], 20000)."
            memcached_restarted, content = self.rest.diag_eval(command)
            #wait until memcached starts
            self.assertTrue(memcached_restarted, "unable to restart memcached process through diag/eval")
            RebalanceHelper.wait_for_stats(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), 600)

        count = self._getr_items(item_count=item_count, replica_count=replica_count, prefix=prefix, vprefix=vprefix)

        if negative_test:
            self.assertTrue(count == 0, "found {0} items, expected none".format(count))
        else:
            self.assertTrue(count == replica_count * item_count, "expected {0} items, got {1} items".format(replica_count * item_count, count))
        if rebalance == GetrTests.DURING_REBALANCE:
            ClusterOperationHelper.end_rebalance(self.master)