示例#1
0
    def iostats(self, nodes, frequency, verbosity=False):

        shells = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                MemcachedClientHelper.direct_client(node, bucket)
                shells.append(RemoteMachineShellConnection(node))
            except:
                pass

        self._task["iostats"] = []

        print "started capturing io stats"

        while not self._aborted():
            time.sleep(frequency)
            print "collecting io_stats"
            for shell in shells:
                kB_read, kB_wrtn = self._extract_io_info(shell)
                if kB_read and kB_wrtn:
                    self._task["iostats"].append({"time": time.time(),
                                                 "ip": shell.ip,
                                                 "read": kB_read,
                                                 "write": kB_wrtn})
        print "finished capturing io stats"
示例#2
0
    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
示例#3
0
    def system_stats(self, nodes, pnames, frequency, verbosity=False):
        shells = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                MemcachedClientHelper.direct_client(node, bucket)
                shells.append(RemoteMachineShellConnection(node))
            except:
                pass
        d = {"snapshots": []}
        #        "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}]

        start_time = str(self._task["time"])
        while not self._aborted():
            time.sleep(frequency)
            current_time = time.time()
            i = 0
            for shell in shells:
                node = nodes[i]
                unique_id = node.ip+'-'+start_time
                for pname in pnames:
                    obj = RemoteMachineHelper(shell).is_process_running(pname)
                    if obj and obj.pid:
                        value = self._extract_proc_info(shell, obj.pid)
                        value["name"] = pname
                        value["id"] = obj.pid
                        value["unique_id"] = unique_id
                        value["time"] = current_time
                        value["ip"] = node.ip
                        d["snapshots"].append(value)
                i +=  1
        self._task["systemstats"] = d["snapshots"]
        print " finished system_stats"
 def insert_key(serverInfo, bucket_name, count, size):
     client = MemcachedClientHelper.proxy_client(serverInfo, bucket_name)
     value = MemcachedClientHelper.create_value("*", size)
     for i in range(count * 1000):
         key = "key_" + str(i)
         flag = random.randint(1, 999)
         client.set(key, 0, flag, value)
示例#5
0
    def set_get_test(self, value_size, number_of_items):
        fixed_value = MemcachedClientHelper.create_value("S", value_size)
        specs = [
            ("default", 0),
            ("set-get-bucket-replica-1", 1),
            ("set-get-bucket-replica-2", 2),
            ("set-get-bucket-replica-3", 3),
        ]
        serverInfo = self.master
        rest = RestConnection(serverInfo)
        bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)

        mcport = rest.get_nodes_self().memcached
        for name, replica in specs:
            rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)

        bucket_data = {}
        buckets = RestConnection(serverInfo).get_buckets()
        for bucket in buckets:
            bucket_data[bucket.name] = {}
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            self.test.assertTrue(ready, "wait_for_memcached failed")

            client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
            inserted = []
            rejected = []
            while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
                try:
                    key = str(uuid.uuid4())
                    client.set(key, 0, 0, fixed_value)
                    inserted.append(key)
                except mc_bin_client.MemcachedError:
                    pass

            retry = 0
            remaining_items = []
            remaining_items.extend(inserted)
            msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
            while retry < 10 and len(remaining_items) > 0:
                verified_keys = []
                for key in remaining_items:
                    try:
                        flag, keyx, value = client.get(key=key)
                        if not value == fixed_value:
                            self.test.fail("value mismatch for key {0}".format(key))
                        verified_keys.append(key)
                    except mc_bin_client.MemcachedError as error:
                        self.log.error(msg.format(error.status, key))
                    retry += 1
                [remaining_items.remove(x) for x in verified_keys]

            print_count = 0
            for key in remaining_items:
                if print_count > 100:
                    break
                print_count += 1
                self.log.error("unable to verify key : {0}".format(key))
            if remaining_items:
                self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
示例#6
0
    def test_checkpointing_with_full_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        self.sleep(self._checkpoint_interval * 2)

        self.get_and_validate_latest_checkpoint()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        self.sleep(self._wait_timeout)

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(self._wait_timeout * 5)

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
                     + '/goxdcr.log*'
        count1 = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Received rollback from DCP stream",
            goxdcr_log)
        self.assertGreater(count1, 0, "full rollback not received from DCP as expected")
        self.log.info("full rollback received from DCP as expected")
        count2 = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Rolled back startSeqno to 0",
            goxdcr_log)
        self.assertGreater(count2, 0, "startSeqno not rolled back to 0 as expected")
        self.log.info("startSeqno rolled back to 0 as expected")

        shell.disconnect()
示例#7
0
 def common_tearDown(servers, testcase):
     RemoteUtilHelper.common_basic_setup(servers)
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for couchbase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         MemcachedClientHelper.flush_bucket(servers[0], 'default')
     except Exception:
         pass
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
示例#8
0
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEquals(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
示例#9
0
 def common_tearDown(servers, testcase):
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         shell.start_membase()
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for membase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         MemcachedClientHelper.flush_bucket(servers[0], 'default', 11211)
     except Exception:
         pass
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
示例#10
0
 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.params = TestInputSingleton.input.test_params
     self.master = TestInputSingleton.input.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(self.master.rest_username, self.master.rest_password)
     info = rest.get_nodes_self()
     rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password,
                                   memoryQuota=info.mcdMemoryReserved)
     ClusterOperationHelper.cleanup_cluster([self.master])
     ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self)
     self._create_default_bucket()
     self.keys_cleanup = []
     self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default", timeout=600)
     self.onenodemoxi = MemcachedClientHelper.proxy_client(self.master, "default", timeout=600)
示例#11
0
    def run_test(self):
        ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
        active_resident_threshold = int(self.input.param("active_resident_threshold", 10))

        mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
        stats = mc.stats()
        threshold = int(self.input.param("threshold", stats[ep_threshold]))
        threshold_reached = False
        self.num_items = self.input.param("items", 10000)
        self._load_doc_data_all_buckets("create")

        # load items till reached threshold or mem-ratio is less than resident ratio threshold
        while not threshold_reached:
            mem_used = int(mc.stats()["mem_used"])
            if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
                self.log.info(
                    "mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
                    % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"])
                )
                items = self.num_items
                self.num_items += self.input.param("items", 10000)
                self._load_doc_data_all_buckets("create", items)
            else:
                threshold_reached = True
                self.log.info("DGM state achieved!!!!")

        # wait for draining of data before restart and warm up
        for bucket in self.buckets:
            RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket)

        while 1:

            #            read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])

            read_data_task = Thread(target=self._run_get)
            read_data_task.start()
            # 5 threads to run stats all and reset asynchronously
            start = time.time()
            while (time.time() - start) < 300:

                stats_all_thread = []
                stats_reset_thread = []

                for i in xrange(self.threads_to_run):
                    stat_str = ""
                    stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_all_thread[i].start()
                    stat_str = "reset"
                    stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_reset_thread[i].start()

                for i in xrange(self.threads_to_run):
                    stats_all_thread[i].join()
                    stats_reset_thread[i].join()

                del stats_all_thread
                del stats_reset_thread

            #            read_data_task.result()
            read_data_task.join()
示例#12
0
 def run(self):
     client = MemcachedClientHelper.direct_client(server, bucket)
     for i in range(num_items):
         key = "key-{0}".format(i)
         value = "value-{0}".format(str(uuid.uuid4())[:7])
         client.set(key, 0, 0, value, 0)
     log.info("Loaded {0} key".format(num_items))
示例#13
0
 def _insert_data(self, howmany):
     self.onenodemc = MemcachedClientHelper.proxy_client(self.master, "default")
     items = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, howmany)]
     for item in items:
         self.onenodemc.set(item, 0, 0, item)
     self.log.info("inserted {0} items".format(howmany))
     self.onenodemc.close()
示例#14
0
 def test_MB_14288(self):
     mc = MemcachedClientHelper.proxy_client(self.master, "default")
     blob = bytearray(1024 * 1024 * 10)
     mc.set("MB-14288", 0, 0, blob)
     flags_v, cas_v, retrieved = mc.get("MB-14288")
     if not blob == retrieved:
         self.fail("It should be possible to store and retrieve values > 1M")
示例#15
0
    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops=='set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0,payload)
                elif ops=='add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0,payload)
                elif ops=='replace':
                    self.client.memcached(key).replace(key, 0, 0,payload)
                    #print 'Replace'
                elif ops=='delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops=='expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time ,0, payload)
                elif ops=='touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))
    def collect_dcp_stats(self, buckets, servers, stat_names = [], extra_key_condition = "replication"):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information
            stat_names: stats we are searching to compare

            Returns:

            map of bucket informing map[bucket][vbucket id][stat name]

            example:: unacked_bytes in dcp
        """
        bucketMap = {}
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                stats = MemcachedClientHelper.direct_client(server, bucket).stats('dcp')
                for key in stats.keys():
                    for stat_name in stat_names:
                        if stat_name in key and extra_key_condition in key:
                            value = int(stats[key])
                            tokens = key.split(":")
                            vb_no = int(tokens[len(tokens) - 1].split("_")[1])
                            if vb_no not in dataMap:
                                dataMap[vb_no] = {}
                            dataMap[vb_no][stat_name] = value
            bucketMap[bucket.name] = dataMap
        return bucketMap
示例#17
0
    def incr_test(self, key, exp, flags, value, incr_amt, decr_amt, incr_time):
        global update_value

        serverInfo = self.master
        client = MemcachedClientHelper.proxy_client(serverInfo, self.bucket_name)
        #            self.log.info('Waitting 15 seconds for memcached started')
        #            time.sleep(15)
        if key != 'no_key':
            client.set(key, exp, flags, value)
        if exp:
            self.log.info('Wait {0} seconds for the key expired' .format(exp + 2))
            time.sleep(exp + 2)
        if decr_amt:
            c, d = client.decr(key, decr_amt)
            self.log.info('decr amt {0}' .format(c))
        try:
            i = 0
            while i < incr_time:
                update_value, cas = client.incr(key, incr_amt)
                i += 1
            self.log.info('incr {0} times with value {1}'.format(incr_time, incr_amt))
            return update_value
        except mc_bin_client.MemcachedError as error:
            self.log.info('memcachedError : {0}'.format(error.status))
            self.test.fail("unable to increment value: {0}".format(incr_amt))
示例#18
0
 def test_append_wrong_cas(self):
     #monitor the memory usage , it should not go beyond
     #doing append 20,000 times ( each 5k) mem_used should not increase more than
     #10 percent
     #
     stats = self.onenodemc.stats()
     initial_mem_used = -1
     if "mem_used" in stats:
         initial_mem_used = int(stats["mem_used"])
         self.assertTrue(initial_mem_used > 0)
     key = str(uuid.uuid4())
     size = 5 * 1024
     value = MemcachedClientHelper.create_value("*", size)
     self.onenodemc.set(key, 0, 0, value)
     flags_v, cas_v, get_v = self.onenodemc.get(key)
     self.onenodemc.append(key, value, cas_v)
     iteration = 50000
     for i in range(0, iteration):
         try:
             self.onenodemc.append(key, value, random.randint(0, 1000))
         except:
             #ignoring the error here
             pass
     stats = self.onenodemc.stats()
     if "mem_used" in stats:
         delta = int(stats["mem_used"]) - initial_mem_used
         self.log.info("initial mem_used {0}, current mem_used {1}".format(initial_mem_used, stats["mem_used"]))
         self.log.info(delta)
示例#19
0
 def _kill_nodes(self, nodes, servers, bucket_name):
     self.reboot = self.input.param("reboot", True)
     if not self.reboot:
         for node in nodes:
             _node = {
                 "ip": node.ip,
                 "port": node.port,
                 "username": self.servers[0].rest_username,
                 "password": self.servers[0].rest_password,
             }
             node_rest = RestConnection(_node)
             _mc = MemcachedClientHelper.direct_client(_node, bucket_name)
             self.log.info("restarted the node %s:%s" % (node.ip, node.port))
             pid = _mc.stats()["pid"]
             command = 'os:cmd("kill -9 {0} ")'.format(pid)
             self.log.info(command)
             killed = node_rest.diag_eval(command)
             self.log.info("killed ??  {0} ".format(killed))
             _mc.close()
     else:
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             command = "reboot"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
             time.sleep(self.wait_timeout * 8)
             shell = RemoteMachineShellConnection(server)
             command = "/sbin/iptables -F"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
示例#20
0
    def test_time_sync_threshold_setting_rest_call(self):

        self.log.info("starting test_time_sync_threshold_setting_rest_call")

        # bucket is created with lww in base test case using the LWW parameter

        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])

        rest = RestConnection(self.master)
        self.assertTrue(
            rest.set_cas_drift_threshold(self.buckets[0], 100000, 200000), "Unable to set the CAS drift threshold"
        )
        time.sleep(15)  # take a few seconds for the stats to settle in
        stats = client.stats()

        self.assertTrue(
            int(stats["ep_hlc_drift_ahead_threshold_us"]) == 100000 * 1000,
            "Ahead threshold incorrect. Expected {0} actual {1}".format(
                100000 * 1000, stats["ep_hlc_drift_ahead_threshold_us"]
            ),
        )

        self.assertTrue(
            int(stats["ep_hlc_drift_behind_threshold_us"]) == 200000 * 1000,
            "Ahead threshold incorrect. Expected {0} actual {1}".format(
                200000 * 1000, stats["ep_hlc_drift_behind_threshold_us"]
            ),
        )
示例#21
0
    def getr_dgm_test(self):
        resident_ratio = self.input.param("resident_ratio", 50)
        gens = []
        delta_items = 200000
        self.num_items = 0
        mc = MemcachedClientHelper.direct_client(self.master, self.default_bucket_name)

        self.log.info("LOAD PHASE")
        end_time = time.time() + self.wait_timeout * 30
        while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\
               int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\
              time.time() < end_time:
            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
            gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
                                    start=self.num_items, end=(self.num_items + delta_items))
            gens.append(copy.deepcopy(gen))
            self._load_all_buckets(self.master, gen, 'create', self.expiration, kv_store=1,
                                   flag=self.flags, only_store_hash=False, batch_size=1)
            self.num_items += delta_items
            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
        self.assertTrue(int(mc.stats()["vb_active_perc_mem_resident"]) < resident_ratio,
                        "Resident ratio is not reached")
        self.verify_cluster_stats(self.servers[:self.nodes_init], only_store_hash=False,
                                  batch_size=1)
        self.log.info("Currently loaded items: %s" % self.num_items)

        self.log.info("READ REPLICA PHASE")
        self.verify_cluster_stats(self.servers[:self.nodes_init], only_store_hash=False,
                                  replica_to_read=self.replica_to_read, batch_size=1)
    def collect_vbucket_num_stats(self,servers, buckets):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information

            Returns:

            Failover stats as follows:
            if not collecting per node :: {bucket : [{key:value}]}
            if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        active_bucketMap = {}
        replica_bucketMap = {}
        for bucket in buckets:
            active_map_data = {}
            replica_map_data = {}
            for server in servers:
                client = MemcachedClientHelper.direct_client(server, bucket)
                stats = client.stats('')
                for key in stats.keys():
                    if key == 'vb_active_num':
                        active_map_data[server.ip] = int(stats[key])
                    if key == 'vb_replica_num':
                        replica_map_data[server.ip] = int(stats[key])
            active_bucketMap[bucket.name] = active_map_data
            replica_bucketMap[bucket.name] = replica_map_data
        return active_bucketMap,replica_bucketMap
示例#23
0
    def _common_test_body(self, moxi=False):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings
        bucket_data = RebalanceBaseTest.bucket_data_init(rest)

        for server in self.servers[1:]:
            self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
            self.log.info("adding node {0}:{1} and rebalance afterwards".format(server.ip, server.port))
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
            msg = "unable to add node {0} to the cluster {1}"
            self.assertTrue(otpNode, msg.format(server.ip, master.ip))
            for name in bucket_data:
                inserted_keys, rejected_keys = \
                MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.servers[0]],
                    name=name,
                    ram_load_ratio= -1,
                    number_of_items=self.keys_count,
                    number_of_threads=1,
                    write_only=True)
                rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
                self.assertTrue(rest.monitorRebalance(),
                    msg="rebalance operation failed after adding node {0}".format(server.ip))
                self.log.info("completed rebalancing in server {0}".format(server))
                IncrementalRebalanceWithParallelReadTests._reader_thread(self, inserted_keys, bucket_data, moxi=moxi)
                self.assertTrue(rest.monitorRebalance(),
                    msg="rebalance operation failed after adding node {0}".format(server.ip))
                break
示例#24
0
 def test_expired_keys(self):
     serverInfo = self.master
     client = MemcachedClientHelper.direct_client(serverInfo, self._bucket_name)
     expirations = [2, 5, 10]
     for expiry in expirations:
         testuuid = uuid.uuid4()
         keys = ["key_%s_%d" % (testuuid, i) for i in range(500)]
         self.log.info("pushing keys with expiry set to {0}".format(expiry))
         for key in keys:
             try:
                 client.set(key, expiry, 0, key)
             except mc_bin_client.MemcachedError as error:
                 msg = "unable to push key : {0} to bucket : {1} error : {2}"
                 self.log.error(msg.format(key, client.vbucketId, error.status))
                 self.fail(msg.format(key, client.vbucketId, error.status))
         self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
         delay = expiry + 5
         msg = "sleeping for {0} seconds to wait for those items with expiry set to {1} to expire"
         self.log.info(msg.format(delay, expiry))
         time.sleep(delay)
         self.log.info('verifying that all those keys have expired...')
         for key in keys:
             try:
                 client.get(key=key)
                 msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                 self.fail(msg.format(expiry, key, delay))
             except mc_bin_client.MemcachedError as error:
                 self.assertEquals(error.status, 1,
                                   msg="expected error code {0} but saw error code {1}".format(1, error.status))
         self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
示例#25
0
 def _stats_befor_warmup(self, bucket_name):
     self.pre_warmup_stats[bucket_name] = {}
     self.stats_monitor = self.input.param("stats_monitor", "")
     self.warmup_stats_monitor = self.input.param("warmup_stats_monitor", "")
     if self.stats_monitor is not "":
         self.stats_monitor = self.stats_monitor.split(";")
     if self.warmup_stats_monitor is not "":
         self.warmup_stats_monitor = self.warmup_stats_monitor.split(";")
     for server in self.servers:
         mc_conn = MemcachedClientHelper.direct_client(server, bucket_name, self.timeout)
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)] = {}
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["uptime"] = mc_conn.stats("")[
             "uptime"
         ]
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items_tot"] = mc_conn.stats(
             ""
         )["curr_items_tot"]
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items"] = mc_conn.stats("")[
             "curr_items"
         ]
         for stat_to_monitor in self.stats_monitor:
             self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][stat_to_monitor] = mc_conn.stats(
                 ""
             )[stat_to_monitor]
         if self.without_access_log:
             for stat_to_monitor in self.warmup_stats_monitor:
                 self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][
                     stat_to_monitor
                 ] = mc_conn.stats("warmup")[stat_to_monitor]
         mc_conn.close()
示例#26
0
 def _verify_direct_client_stats(self, bucket, command, output):
     mc_conn = MemcachedClientHelper.direct_client(self.master,
                                                   bucket.name, self.timeout)
     for line in output:
         stats = line.rsplit(":", 1)
         collect_stats = ""
         commands = ["hash", "tapagg"]
         if command in commands:
             output, error = self.shell.execute_cbstats(bucket, command)
             d = []
             if len(output) > 0:
                 d = dict(s.strip().rsplit(':', 1) for s in output)
                 collect_stats = d[stats[0].strip()].strip()
             else:
                 raise Exception("Command does not throw out error message \
                                  but cbstats gives no output. \
                                  Please check the output manually")
         else:
             collect_stats = mc_conn.stats(command)[stats[0].strip()]
         self.log.info("CbStats###### for {0}:::{1}=={2}" \
                       .format(stats[0].strip(), collect_stats, stats[1].strip()))
         if stats[1].strip() == collect_stats:
             continue
         else:
             if stats[0].find('tcmalloc') != -1 or stats[0].find('bytes') != -1 or\
             stats[0].find('mem_used') != -1:
                 self.log.warn("Stat didn't match, but it can be changed, not a bug")
                 continue
             raise Exception("Command does not throw out error message \
                              but cbstats does not match.")
示例#27
0
    def _verify_data(self, version):
        #verify all the keys
        #let's use vbucketaware
        rest = RestConnection(self.servers[0])
        moxi = MemcachedClientHelper.proxy_client(self.servers[0], self.bucket_name)
        index = 0
        all_verified = True
        keys_failed = []
        for key in self.updated_keys:
            try:
                index += 1
                flag, keyx, value = moxi.get(key=key)
                self.assertTrue(value.endswith(version),
                                msg='values do not match . key value should endwith {0}'.format(version))
            except MemcachedError as error:
                self.log.error(error)
                self.log.error(
                    "memcachedError : {0} - unable to get a pre-inserted key : {0}".format(error.status, key))
                keys_failed.append(key)
                all_verified = False
                #            except :
                #                self.log.error("unknown errors unable to get a pre-inserted key : {0}".format(key))
                #                keys_failed.append(key)
                #                all_verified = False

        self.assertTrue(all_verified,
                        'unable to verify #{0} keys'.format(len(keys_failed)))
示例#28
0
    def block_for_replication(self, key, cas=0, num=1, timeout=0, persist=False):
        """
        observe a key until it has been replicated to @param num of servers

        @param persist : block until item has been persisted to disk
        """
        vbucketid = self.client._get_vBucket_id(key)
        repl_servers = self._get_server_str(vbucketid, repl=True)
        persisted = 0
        self.log.info("VbucketId:%s on replicated servers:%s" % (vbucketid, repl_servers))

        while len(repl_servers) >= num > 0:
            for server in repl_servers:
                node = self._get_node(server)
                self.log.info("Replicated Server:- %s" % (server))
                newclient = MemcachedClientHelper.direct_client(node, self.default_bucket_name)
                t_start = datetime.now()
                while persisted == 0:
                    opaque, rep_time, persist_time, persisted, cas = newclient.observe(key)
                t_end = datetime.now()
                self.log.info("######key:-%s and Server:- %s#########" % (key, server))
                self.log.info("Persisted:- %s" % (persisted))
                self.log.info("Time taken to persist:- %s" % (t_end - t_start))
                num = num + 1
                if num == 0:
                    break
        return True
示例#29
0
 def _restart_memcache(self, bucket_name):
     rest = RestConnection(self.master)
     nodes = rest.node_statuses()
     self._kill_nodes(nodes, bucket_name)
     start = time.time()
     memcached_restarted = False
     for server in self.servers[:self.nodes_init]:
         mc = None
         while time.time() - start < 60:
             try:
                 mc = MemcachedClientHelper.direct_client(server, bucket_name)
                 stats = mc.stats()
                 new_uptime = int(stats["uptime"])
                 self.log.info("warmutime%s:%s" % (new_uptime, self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["uptime"]))
                 if new_uptime < self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["uptime"]:
                     self.log.info("memcached restarted...")
                     memcached_restarted = True
                     break;
             except Exception:
                 self.log.error("unable to connect to %s:%s" % (server.ip, server.port))
                 if mc:
                     mc.close()
                 time.sleep(1)
         if not memcached_restarted:
             self.fail("memcached did not start %s:%s" % (server.ip, server.port))
示例#30
0
    def test_time_sync_threshold_setting(self):

        self.log.info("starting test_time_sync_threshold_setting")

        # bucket is created with lww in base test case using the LWW parameter

        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold
            ),
        )
        # change the setting and verify it is per the new setting - this may or may not be supported

        shell = RemoteMachineShellConnection(self.servers[0])
        output, error = shell.execute_cbepctl(
            self.buckets[0],
            "",
            "set vbucket_param",
            "hlc_drift_ahead_threshold_us ",
            str(LWWStatsTests.DEFAULT_THRESHOLD / 2) + LWWStatsTests.DUMMY_VBUCKET,
        )
        if len(error) > 0:
            self.fail("Failed to set the drift counter threshold, please check the logs.")

        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold
            ),
        )
示例#31
0
 def verify_data(server,
                 keys,
                 value_equal_to_key,
                 verify_flags,
                 test,
                 debug=False,
                 bucket="default"):
     log_error_count = 0
     # verify all the keys
     log = logger.get("infra")
     client = MemcachedClientHelper.direct_client(server, bucket)
     vbucket_count = len(BucketHelper(server).get_vbuckets(bucket))
     # populate key
     index = 0
     all_verified = True
     keys_failed = []
     for key in keys:
         try:
             index += 1
             vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1)
             client.vbucketId = vbucketId
             flag, keyx, value = client.get(key=key)
             if value_equal_to_key:
                 test.assertEquals(value, key, msg='values dont match')
             if verify_flags:
                 actual_flag = socket.ntohl(flag)
                 expected_flag = ctypes.c_uint32(zlib.adler32(value)).value
                 test.assertEquals(actual_flag,
                                   expected_flag,
                                   msg='flags dont match')
             if debug:
                 log.info("verified key #{0} : {1}".format(index, key))
         except mc_bin_client.MemcachedError as error:
             if debug:
                 log_error_count += 1
                 if log_error_count < 100:
                     log.error(error)
                     log.error(
                         "memcachedError : {0} - unable to get a pre-inserted key : {0}"
                         .format(error.status, key))
             keys_failed.append(key)
             all_verified = False
     client.close()
     if len(keys_failed) > 0:
         log.error('unable to verify #{0} keys'.format(len(keys_failed)))
     return all_verified
示例#32
0
 def set_test(self, key, exp, flags, values):
     serverInfo = self.master
     client = MemcachedClientHelper.proxy_client(serverInfo, self.bucket_name,)
     #            self.log.info('Waitting 15 seconds for memcached started')
     #            time.sleep(15)
     for v in values:
         for f in flags:
             client.set(key, exp, f, v)
             flags_v, cas_v, get_v = client.get(key)
             if get_v == v:
                 if flags_v == f:
                     self.log.info('Flags is set to {0}; and when run get {1}'.format(f, flags_v))
                 else:
                     self.test.fail('FAILED.  Flags is set to {0};  and when run get {1}'.format(f, flags_v))
                 self.log.info('Value is set {0};  and when run get {1}'.format(v, get_v))
             else:
                 self.test.fail('FAILED.  Value is set to {0};  and when run get {1}'.format(v, get_v))
示例#33
0
    def keys_exist_or_assert(keys, server, bucket_name, test, queue=None):
        #we should try out at least three times
        log = logger.Logger.get_logger()
        #verify all the keys
        client = MemcachedClientHelper.proxy_client(server, bucket_name)
        #populate key
        retry = 1

        keys_left_to_verify = []
        keys_left_to_verify.extend(copy.deepcopy(keys))
        log_count = 0
        while retry < 6 and len(keys_left_to_verify) > 0:
            msg = "trying to verify {0} keys - attempt #{1} : {2} keys left to verify"
            log.info(msg.format(len(keys), retry, len(keys_left_to_verify)))
            keys_not_verified = []
            for key in keys_left_to_verify:
                try:
                    client.get(key=key)
                except mc_bin_client.MemcachedError as error:
                    keys_not_verified.append(key)
                    if log_count < 100:
                        log.error("key {0} does not exist because {1}".format(
                            key, error))
                        log_count += 1
            retry += 1
            keys_left_to_verify = keys_not_verified
        if len(keys_left_to_verify) > 0:
            log_count = 0
            for key in keys_left_to_verify:
                log.error("key {0} not found".format(key))
                log_count += 1
                if log_count > 100:
                    break
            msg = "unable to verify {0} keys".format(len(keys_left_to_verify))
            log.error(msg)
            if test:
                test.fail(msg=msg)
            if queue is None:
                return False
            else:
                queue.put(False)
        log.info("verified that {0} keys exist".format(len(keys)))
        if queue is None:
            return True
        else:
            queue.put(True)
示例#34
0
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
        self.assertTrue(ready, "not all items persisted. see logs")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
示例#35
0
 def _load_data(self, master, load_ratio):
     log = logger.Logger.get_logger()
     if load_ratio == -1:
         #let's load 0.1 data
         load_ratio = 0.1
     distribution = {1024: 0.5, 20: 0.5}
     #TODO: with write_only = False, sometimes the load hangs, debug this
     inserted_keys, rejected_keys = \
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           ram_load_ratio=load_ratio,
                                                           number_of_threads=1,
                                                           value_size_distribution=distribution,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats(master, "default", 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats(master, "default", 'ep_flusher_todo', 0)
     return inserted_keys
示例#36
0
    def getr_dgm_test(self):
        resident_ratio = self.input.param("resident_ratio", 50)
        gens = []
        delta_items = 200000
        self.num_items = 0
        mc = MemcachedClientHelper.direct_client(self.master,
                                                 self.default_bucket_name)

        self.log.info("LOAD PHASE")
        end_time = time.time() + self.wait_timeout * 30
        while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\
               int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\
              time.time() < end_time:
            self.log.info("Resident ratio is %s" %
                          mc.stats()["vb_active_perc_mem_resident"])
            gen = DocumentGenerator('test_docs',
                                    '{{"age": {0}}}',
                                    xrange(5),
                                    start=self.num_items,
                                    end=(self.num_items + delta_items))
            gens.append(copy.deepcopy(gen))
            self._load_all_buckets(self.master,
                                   gen,
                                   'create',
                                   self.expiration,
                                   kv_store=1,
                                   flag=self.flags,
                                   only_store_hash=False,
                                   batch_size=1)
            self.num_items += delta_items
            self.log.info("Resident ratio is %s" %
                          mc.stats()["vb_active_perc_mem_resident"])
        self.assertTrue(
            int(mc.stats()["vb_active_perc_mem_resident"]) < resident_ratio,
            "Resident ratio is not reached")
        self.verify_cluster_stats(self.servers[:self.nodes_init],
                                  only_store_hash=False,
                                  batch_size=1)
        self.log.info("Currently loaded items: %s" % self.num_items)

        self.log.info("READ REPLICA PHASE")
        self.verify_cluster_stats(self.servers[:self.nodes_init],
                                  only_store_hash=False,
                                  replica_to_read=self.replica_to_read,
                                  batch_size=1)
示例#37
0
    def collect_compare_dcp_stats(self,
                                  buckets,
                                  servers,
                                  perNode=True,
                                  stat_name='unacked_bytes',
                                  compare_value=0,
                                  flow_control_buffer_size=20971520,
                                  filter_list=[]):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:
              buckets: bucket informaiton
              servers: server information
              stat_name: stat we are searching to compare
              compare_value: the comparison value to be satisfied

            Returns:
              map of bucket informing if stat matching was
              satisfied / not satisfied

            example:: unacked_bytes in dcp
        """
        bucketMap = dict()
        for bucket in buckets:
            bucketMap[bucket.name] = True
        for bucket in buckets:
            for server in servers:
                client = MemcachedClientHelper.direct_client(server, bucket)
                stats = client.stats('dcp')
                for key in stats.keys():
                    do_filter = False
                    if stat_name in key:
                        for filter_key in filter_list:
                            if filter_key in key:
                                do_filter = True
                        value = int(stats[key])
                        if not do_filter:
                            if value != compare_value:
                                if "eq_dcpq:mapreduce_view" in key:
                                    if value >= flow_control_buffer_size:
                                        bucketMap[bucket] = False
                                else:
                                    bucketMap[bucket] = False
        return bucketMap
示例#38
0
 def _getl_body(self, prefix, getl_timeout, expiration):
     node = self.memcapableTestBase.master
     mc = MemcachedClientHelper.direct_client(node, "default")
     key = "{0}_{1}".format(prefix, str(uuid.uuid4()))
     self.log.info("setting key {0} with expiration {1}".format(key, expiration))
     mc.set(key, expiration, 0, key)
     self.log.info("getl key {0} timeout {1}".format(key, getl_timeout))
     try:
         mc.getl(key, getl_timeout)
     except Exception as ex:
         if getl_timeout < 0:
             print ex
         else:
             raise
     self.log.info("get key {0} which is locked now".format(key))
     flags_v, cas_v, get_v = mc.get(key)
     self.assertEquals(get_v, key)
     i = 0
     while i < 40:
         self.log.info("setting key {0} with new value {1}".format(key, '*'))
         try:
             mc.set(key, 0, 0, '*')
             break
         except Exception as ex:
             print ex
         time.sleep(1)
         print i
         i += 1
     if getl_timeout > 30:
         self.log.info("sleep for {0} seconds".format(30))
         time.sleep(30)
     elif getl_timeout > 0:
         self.log.info("sleep for {0} seconds".format(15 - getl_timeout))
         self.log.info("sleep for {0} seconds".format(15))
         time.sleep(getl_timeout)
     else:
         self.log.info("sleep for {0} seconds".format(15))
         time.sleep(15)
     self.log.info("lock should have timed out by now . try to set the item again")
     new_value = "*"
     self.log.info("setting key {0} with new value {1}".format(key, new_value))
     mc.set(key, 0, 0, new_value)
     self.log.info("get key {0}".format(key))
     flags_v, cas_v, get_v = mc.get(key)
     self.assertEquals(get_v, "*")
示例#39
0
 def _get_items(self, item_count, prefix, vprefix=""):
     client = MemcachedClientHelper.proxy_client(self.master, self.default_bucket_name)
     time_start = time.time()
     get_count = 0
     last_error = ""
     error_count = 0
     for i in range(item_count):
         try:
             value = client.get(prefix + "_key_" + str(i))[2]
             assert(value == vprefix + "_value_" + str(i))
             get_count += 1
         except Exception as e:
             last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e)
             error_count += 1
     if error_count > 0:
         self.log.error("got {0} errors, last error: {1}".format(error_count, last_error))
     self.log.info("got {0} replica items in {1} seconds".format(get_count, time.time() - time_start))
     return get_count
示例#40
0
    def print_taps_from_all_nodes(rest, bucket='default'):
        #get the port number from rest ?

        log = logger.Logger.get_logger()
        nodes_for_stats = rest.get_nodes()
        for node_for_stat in nodes_for_stats:
            try:
                client = MemcachedClientHelper.direct_client(
                    node_for_stat, bucket)
                log.info("getting tap stats... for {0}".format(
                    node_for_stat.ip))
                tap_stats = client.stats('tap')
                if tap_stats:
                    RebalanceHelper.log_interesting_taps(
                        node_for_stat, tap_stats, log)
                client.close()
            except Exception as ex:
                log.error("error {0} while getting stats...".format(ex))
示例#41
0
 def _update_keys(self, version):
     rejected_keys = []
     #quit after updating max 100,000 keys
     self.updated_keys = []
     moxi = MemcachedClientHelper.proxy_client(self.servers[0], self.bucket_name)
     for key in self.keys:
         if len(self.updated_keys) > 10000:
             break
         value = '{0}'.format(version)
         try:
             moxi.append(key, value)
             self.updated_keys.append(key)
         except MemcachedError:
         #                self.log.error(error)
         #                self.log.error("unable to update key : {0} to bucket : {1}".format(key, client.vbucketId))
             rejected_keys.append(key)
     if len(rejected_keys) > 0:
         self.log.error("unable to update {0} keys".format(len(rejected_keys)))
示例#42
0
 def load_data(master, bucket, keys_count= -1, load_ratio= -1, delete_ratio=0, expiry_ratio=0, test=None):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys = \
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
         name=bucket,
         ram_load_ratio=load_ratio,
         number_of_items=keys_count,
         number_of_threads=2,
         write_only=True,
         delete_ratio=delete_ratio,
         expiry_ratio=expiry_ratio,
         moxi=True)
     log.info("wait until data is completely persisted on the disk")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0, timeout_in_seconds=120)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0, timeout_in_seconds=120)
     test.assertTrue(ready, "wait_for ep_flusher_todo == 0 failed")
     return inserted_keys
示例#43
0
 def decr_test(self, key, exp, flags, value, incr_amt, decr_amt, decr_time):
     global update_value
     serverInfo = self.master
     client = MemcachedClientHelper.proxy_client(serverInfo, self.bucket_name)
     if key != 'no_key':
         client.set(key, exp, flags, value)
     if exp:
         self.log.info('Wait {0} seconds for the key expired' .format(exp + 2))
         time.sleep(exp + 2)
     if incr_amt:
         c, d = client.incr(key, incr_amt)
         self.log.info('incr amt {0}' .format(c))
     i = 0
     while i < decr_time:
         update_value, cas = client.decr(key, decr_amt)
         i += 1
     self.log.info('decr {0} times with value {1}'.format(decr_time, decr_amt))
     return update_value
示例#44
0
 def flushctl_set_per_node(server, key, val, bucket='default'):
     log = logger.Logger.get_logger()
     rest = RestConnection(server)
     node = rest.get_nodes_self()
     mc = MemcachedClientHelper.direct_client(server, bucket)
     log.info("Setting flush param on server {0}, {1} to {2} on {3}".format(
         server, key, val, bucket))
     # Workaround for CBQE-249, ideally this should be node.version
     index_path = node.storage[0].get_index_path()
     if index_path is '':
         # Indicates non 2.0 build
         rv = mc.set_flush_param(key, str(val))
     else:
         type = ClusterOperationHelper._get_engine_param_type(key)
         rv = mc.set_param(key, str(val), type)
     log.info("Setting flush param on server {0}, {1} to {2}, result: {3}".
              format(server, key, val, rv))
     mc.close()
示例#45
0
    def _restart_memcache(self, bucket_name):
        rest = RestConnection(self.master)
        nodes = rest.node_statuses()
        is_partial = self.input.param("is_partial", "True")
        _nodes = []
        if len(self.servers) > 1 :
            skip = 2
        else:
            skip = 1
        if is_partial:
            _nodes = nodes[:len(nodes):skip]
        else:
            _nodes = nodes

        _servers = []
        for server in self.servers:
            for _node in _nodes:
                if server.ip == _node.ip:
                    _servers.append(server)

        self._kill_nodes(_nodes, _servers, bucket_name)

        start = time.time()
        memcached_restarted = False

        for server in _servers:
            while time.time() - start < (self.wait_timeout * 2):
                mc = None
                try:
                    mc = MemcachedClientHelper.direct_client(server, bucket_name)
                    stats = mc.stats()
                    new_uptime = int(stats["uptime"])
                    self.log.info("New warmup uptime %s:%s" % (new_uptime, self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["uptime"]))
                    if new_uptime < self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["uptime"]:
                        self.log.info("memcached restarted...")
                        memcached_restarted = True
                        break;
                except Exception:
                    self.log.error("unable to connect to %s:%s for bucket %s" % (server.ip, server.port, bucket_name))
                    if mc:
                        mc.close()
                    time.sleep(5)
            if not memcached_restarted:
                self.fail("memcached did not start %s:%s for bucket %s" % (server.ip, server.port, bucket_name))
示例#46
0
 def set_expiry_pager_sleep_time(master, bucket, value=30):
     log = logger.get("infra")
     rest = RestConnection(master)
     servers = rest.get_nodes()
     for server in servers:
         # Not bucket specific, so no need to pass in the bucket_name
         log.info("Connecting to memcached %s:%s" %
                  (server.ip, server.memcached))
         mc = MemcachedClientHelper.direct_client(server, bucket)
         log.info("Set exp_pager_stime flush param on server %s:%s" %
                  (server.ip, server.port))
         try:
             mc.set_flush_param("exp_pager_stime", str(value))
             log.info("Set exp_pager_stime flush param on server %s:%s" %
                      (server.ip, server.port))
         except Exception as ex:
             log.error("Unable to set exp_pager_stime flush param on %s:%s"
                       "\n Exception: %s" %
                       (server.ip, server.memcached, ex))
示例#47
0
    def get_stats(servers, bucket, stat_param, stat):
        """Gets stats for a specific key from a list of servers

        Parameters:
            servers - A list of servers to get a stat from. ([TestInputServer])
            bucket - The name of the bucket to get a stat from. (String)
            stat_param - The parameter for the stats call, eg. 'checkpoint'. (String)
            stat - The name of the stat to get. (String)

        Returns:
            Dictionary - The key is the test input server and the value is the
            result of the for the stat passed in as a parameter.
        """
        result = {}
        for server in servers:
            client = MemcachedClientHelper.direct_client(server, bucket)
            stats = client.stats(stat_param)
            result[server] = stats[stat]
        return result
示例#48
0
    def test_logical_clock_ticks(self):

        self.log.info('starting test_logical_clock_ticks')

        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        sdk_client = SDKClient(scheme='couchbase', hosts = [self.servers[0].ip], bucket = self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])


        # do a bunch of mutations to set the max cas
        gen_load  = BlobGenerator('key-for-cas-test-logical-ticks', 'value-for-cas-test-', self.value_size, end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        base_total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            #print vbucket_stats['vb_' + str(i) + ':logical_clock_ticks']
            base_total_logical_clock_ticks = base_total_logical_clock_ticks + int(vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])
        self.log.info('The base total logical clock ticks is {0}'.format( base_total_logical_clock_ticks))

        # move the system clock back so the logical counter part of HLC is used and the logical clock ticks
        # stat is incremented
        self.assertTrue(  shell.change_system_time( -LWWStatsTests.ONE_HOUR_IN_SECONDS ), 'Failed to advance the clock')

        # do more mutations
        NUMBER_OF_MUTATIONS = 10000
        gen_load  = BlobGenerator('key-for-cas-test-logical-ticks', 'value-for-cas-test-', self.value_size, end=NUMBER_OF_MUTATIONS)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        time.sleep(30)
        total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            total_logical_clock_ticks = total_logical_clock_ticks + int(vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])

        self.log.info('The total logical clock ticks is {0}'.format( total_logical_clock_ticks))

        self.assertTrue( total_logical_clock_ticks - base_total_logical_clock_ticks == NUMBER_OF_MUTATIONS,
                         'Expected clock tick {0} actual {1}'.format(NUMBER_OF_MUTATIONS,
                                                    total_logical_clock_ticks- base_total_logical_clock_ticks  ))
示例#49
0
 def load_data(self, master, bucket, keys_count):
     log = logger.Logger.get_logger()
     #        gen_create = BlobGenerator("loadONE", "loadONE-", 256, start=0, end=keys_count)
     #        BaseTestCase._load_all_buckets(master, gen_create, "create", 0)
     inserted_keys_cnt = 0
     while inserted_keys_cnt < keys_count:
         keys_cnt, rejected_keys_cnt =\
         MemcachedClientHelper.load_bucket(servers=[master],
             name=bucket,
             number_of_items=keys_count,
             number_of_threads=5,
             write_only=True)
         inserted_keys_cnt += keys_cnt
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size',
                                           0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                           'ep_flusher_todo', 0)
     return inserted_keys_cnt
示例#50
0
    def delete_docs(self, num_of_docs, prefix):
        moxi = MemcachedClientHelper.proxy_client(self.master, self.bucket)
        doc_names = []
        for i in range(0, num_of_docs):
            key = "{0}-{1}".format(prefix, i)
            try:
                moxi.delete(key)
            except MemcachedError as e:
                # Don't care if we try to delete a document that doesn't exist
                if e.status == memcacheConstants.ERR_NOT_FOUND:
                    continue
                else:
                    raise
            doc_names.append(key)

        self.wait_for_persistence(180)

        self.log.info("deleted {0} json documents".format(len(doc_names)))
        return doc_names
示例#51
0
 def keys_dont_exist(server, keys, bucket):
     log = logger.Logger.get_logger()
     #verify all the keys
     client = MemcachedClientHelper.direct_client(server, bucket)
     vbucket_count = len(BucketHelper(server).get_vbuckets(bucket))
     #populate key
     for key in keys:
         try:
             vbucketId = crc32.crc32_hash(key) & (vbucket_count - 1)
             client.vbucketId = vbucketId
             client.get(key=key)
             client.close()
             log.error('key {0} should not exist in the bucket'.format(key))
             return False
         except mc_bin_client.MemcachedError as error:
             log.error(error)
             log.error("expected memcachedError : {0} - unable to get a pre-inserted key : {1}".format(error.status, key))
     client.close()
     return True
示例#52
0
 def _verify_direct_client_stats(self, bucket, command, output):
     mc_conn = MemcachedClientHelper.direct_client(self.master, bucket.name,
                                                   self.timeout)
     for line in output:
         stats = line.rsplit(":", 1)
         self.log.info("CbStats###### for %s:::%s==%s" %
                       (stats[0], mc_conn.stats(command)[stats[0].strip()],
                        stats[1].strip()))
         if stats[1].strip() == mc_conn.stats(command)[stats[0].strip()]:
             continue
         else:
             if stats[0].find('tcmalloc') != -1 or stats[0].find('bytes') != -1 or\
             stats[0].find('mem_used') != -1:
                 self.log.warn(
                     "Stat didn't match, but it can be changed, not a bug")
                 continue
             raise Exception(
                 "Command does not throw out error message but cbstats does not match."
             )
示例#53
0
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(server, bucket_name)
                    stats = mc.stats()
                    if stats is not None and 'ep_warmup_thread' in stats and stats['ep_warmup_thread'] == 'complete':
                        break
                    else:
                        log.info(" Did not get the stats from the server yet, trying again.....")
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get ep_warmup_time stats from server %s:%s, exception %s" %
                             (server.ip, server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds." % (
                        server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info("warmup completed, awesome!!! Warmed up. %s items " % (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info(
                                "still warming up .... curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
                else:
                    self.fail("Value of ep warmup thread does not exist, exiting from this server")
                time.sleep(5)
            mc.close()
        return warmed_up
示例#54
0
    def data_ops_with_moxi(self, server, data_op, buckets, items, use_ascii):

        for bucket in buckets:
            try:
                client = MemcachedClientHelper.proxy_client(server, bucket.name, force_ascii=use_ascii)
            except Exception as ex:
                self.log.error("unable to create memcached client due to {0}..".format(ex))

        with self.assertRaises(MemcachedError) as exp:
            for itr in xrange(items):
                key = 'bucketflush' + str(itr)
                value = 'bucketflush-' + str(itr)
                if data_op in ["create", "update"]:
                    client.set(key, 0, 0, value)
                elif data_op == "delete":
                    client.delete(key)

        memcached_exception = exp.exception
        self.assertEqual(memcached_exception.status, 134, msg="Unexpected Exception - {0}".format(memcached_exception))
        self.log.info("Expected Exception Caught - {0}".format(memcached_exception))
示例#55
0
    def memory_quota_default_bucket(self):
        resident_ratio = self.input.param("resident_ratio", 50)
        delta_items = 200000
        mc = MemcachedClientHelper.direct_client(self.master, self.default_bucket_name)

        self.log.info("LOAD PHASE")
        end_time = time.time() + self.wait_timeout * 30
        while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\
               int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\
              time.time() < end_time:
            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
            gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
                                    start=self.num_items, end=(self.num_items + delta_items))
            self._load_all_buckets(self.master, gen, 'create', 0)
            self.num_items += delta_items
            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
        memory_mb = int(mc.stats("memory")["total_allocated_bytes"])/(1024 * 1024)
        self.log.info("total_allocated_bytes is %s" % memory_mb)
        self.assertTrue(memory_mb <= self.quota, "total_allocated_bytes %s should be within %s" %(
                                                  memory_mb, self.quota))
示例#56
0
 def load_data(self, master, bucket, keys_count):
     inserted_keys_cnt = 0
     repeat_count = 0
     while inserted_keys_cnt < keys_count and repeat_count < 5:
         keys_cnt, rejected_keys_cnt = \
         MemcachedClientHelper.load_bucket(servers=[master],
             name=bucket,
             number_of_items=keys_count,
             number_of_threads=5,
             write_only=True)
         inserted_keys_cnt += keys_cnt
         if keys_cnt == 0:
             repeat_count += 1
         else:
             repeat_count = 0
     if repeat_count == 5:
         log.exception("impossible to load data")
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_persistence(master, bucket)
     return inserted_keys_cnt
    def _unsupported_replicas(self, replica):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 102400)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            mc.get(k)
            try:
                mc.sync_replication([{"key": k, "vbucket": vBucket}], replica)
                msg = "server did not raise an error when running sync_replication with {0} replicas"
                self.fail(msg.format(replica))
            except MemcachedError as error:
                self.log.info("error {0} {1} as expected".format(
                    error.status, error.msg))

        for k in keys:
            mc = self.awareness.memcached(k)
            mc.get(k)
示例#58
0
 def _stats_befor_warmup(self, bucket_name):
     self.pre_warmup_stats[bucket_name] = {}
     self.stats_monitor = self.input.param("stats_monitor", "")
     self.warmup_stats_monitor = self.input.param("warmup_stats_monitor", "")
     if self.stats_monitor is not '':
         self.stats_monitor = self.stats_monitor.split(";")
     if self.warmup_stats_monitor is not '':
         self.warmup_stats_monitor = self.warmup_stats_monitor.split(";")
     for server in self.servers:
         mc_conn = MemcachedClientHelper.direct_client(server, bucket_name, self.timeout)
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)] = {}
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["uptime"] = mc_conn.stats("")["uptime"]
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items_tot"] = mc_conn.stats("")["curr_items_tot"]
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items"] = mc_conn.stats("")["curr_items"]
         for stat_to_monitor in self.stats_monitor:
             self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][stat_to_monitor] = mc_conn.stats('')[stat_to_monitor]
         if self.without_access_log:
             for stat_to_monitor in self.warmup_stats_monitor:
                 self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][stat_to_monitor] = mc_conn.stats('warmup')[stat_to_monitor]
         mc_conn.close()
示例#59
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'
        self.bucket_storage = TestInputSingleton.input.param("bucket_storage", 'couchstore')
        serverInfo = self.master

        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 // 3

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        
        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        try:
            rest.create_bucket(bucket=self._bucket_name,
                               ramQuotaMB=bucket_ram,
                               proxyPort=info.memcached,
                               storageBackend=self.bucket_storage)
        except Exception as ex:
            self.log.info(ex)

        if (testconstants.TESTRUNNER_CLIENT in list(os.environ.keys())) and os.environ[testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK:
            self.client = SDKSmartClient(serverInfo, self._bucket_name, compression=TestInputSingleton.input.param(
                "sdk_compression", True))
        else:
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name)
            self.assertTrue(ready, "wait_for_memcached failed")
            self.client = MemcachedClientHelper.direct_client(serverInfo, self._bucket_name)

        self._log_start()
示例#60
0
 def _verify_direct_client_stats(self, bucket, command, output):
     mc_conn = MemcachedClientHelper.direct_client(self.master, bucket.name,
                                                   self.timeout)
     for line in output:
         stats = line.rsplit(":", 1)
         if "c:/" in line:
             stats = line.split(":", 1)
         collect_stats = ""
         commands = ["hash", "tapagg"]
         if command in commands:
             output, error = self.shell.execute_cbstats(bucket, command)
             d = []
             if len(output) > 0:
                 d = dict(s.strip().rsplit(':', 1) for s in output)
                 collect_stats = d[stats[0].strip()].strip()
             else:
                 raise Exception("Command does not throw out error message \
                                  but cbstats gives no output. \
                                  Please check the output manually")
         else:
             try:
                 if "magma" in stats[0]:
                     """ skip to check magma stats.  Will check in other test """
                     continue
                 collect_stats = mc_conn.stats(command)[stats[0].strip()]
             except Exception as e:
                 print("\n Exception error: ", e)
         self.log.info("CbStats###### for {0}:::{1}=={2}" \
                       .format(stats[0].strip(), collect_stats, stats[1].strip()))
         if stats[1].strip() == collect_stats:
             continue
         else:
             if stats[0].find('tcmalloc') != -1 or stats[0].find('bytes') != -1 or \
                stats[0].find('mem_used') != -1 or stats[0].find("allocated") != -1 or \
                stats[0].find('fragmentation_size') != -1 or stats[0].find('mapped') != -1 or \
                stats[0].find('resident') != -1 or stats[0].find('retained') != -1:
                 self.log.warning(
                     "Stat didn't match, but it can be changed, not a bug")
                 continue
             raise Exception("Command does not throw out error message \
                              but cbstats does not match.")