Exemple #1
0
    def iostats(self, nodes, frequency, verbosity=False):

        shells = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                MemcachedClientHelper.direct_client(node, bucket)
                shells.append(RemoteMachineShellConnection(node))
            except:
                pass

        self._task["iostats"] = []

        print "started capturing io stats"

        while not self._aborted():
            time.sleep(frequency)
            print "collecting io_stats"
            for shell in shells:
                kB_read, kB_wrtn = self._extract_io_info(shell)
                if kB_read and kB_wrtn:
                    self._task["iostats"].append({"time": time.time(),
                                                 "ip": shell.ip,
                                                 "read": kB_read,
                                                 "write": kB_wrtn})
        print "finished capturing io stats"
Exemple #2
0
    def system_stats(self, nodes, pnames, frequency, verbosity=False):
        shells = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                MemcachedClientHelper.direct_client(node, bucket)
                shells.append(RemoteMachineShellConnection(node))
            except:
                pass
        d = {"snapshots": []}
        #        "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}]

        start_time = str(self._task["time"])
        while not self._aborted():
            time.sleep(frequency)
            current_time = time.time()
            i = 0
            for shell in shells:
                node = nodes[i]
                unique_id = node.ip+'-'+start_time
                for pname in pnames:
                    obj = RemoteMachineHelper(shell).is_process_running(pname)
                    if obj and obj.pid:
                        value = self._extract_proc_info(shell, obj.pid)
                        value["name"] = pname
                        value["id"] = obj.pid
                        value["unique_id"] = unique_id
                        value["time"] = current_time
                        value["ip"] = node.ip
                        d["snapshots"].append(value)
                i +=  1
        self._task["systemstats"] = d["snapshots"]
        print " finished system_stats"
    def test_checkpointing_with_full_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        self.sleep(self._checkpoint_interval * 2)

        self.get_and_validate_latest_checkpoint()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        self.sleep(self._wait_timeout)

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(self._wait_timeout * 5)

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
                     + '/goxdcr.log*'
        count1 = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Received rollback from DCP stream",
            goxdcr_log)
        self.assertGreater(count1, 0, "full rollback not received from DCP as expected")
        self.log.info("full rollback received from DCP as expected")
        count2 = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Rolled back startSeqno to 0",
            goxdcr_log)
        self.assertGreater(count2, 0, "startSeqno not rolled back to 0 as expected")
        self.log.info("startSeqno rolled back to 0 as expected")

        shell.disconnect()
Exemple #4
0
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEquals(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
Exemple #5
0
 def _kill_nodes(self, nodes, servers, bucket_name):
     self.reboot = self.input.param("reboot", True)
     if not self.reboot:
         for node in nodes:
             _node = {
                 "ip": node.ip,
                 "port": node.port,
                 "username": self.servers[0].rest_username,
                 "password": self.servers[0].rest_password,
             }
             node_rest = RestConnection(_node)
             _mc = MemcachedClientHelper.direct_client(_node, bucket_name)
             self.log.info("restarted the node %s:%s" % (node.ip, node.port))
             pid = _mc.stats()["pid"]
             command = 'os:cmd("kill -9 {0} ")'.format(pid)
             self.log.info(command)
             killed = node_rest.diag_eval(command)
             self.log.info("killed ??  {0} ".format(killed))
             _mc.close()
     else:
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             command = "reboot"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
             time.sleep(self.wait_timeout * 8)
             shell = RemoteMachineShellConnection(server)
             command = "/sbin/iptables -F"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
Exemple #6
0
    def test_time_sync_threshold_setting(self):
        '''
        @summary: This method checks for the change in drift threshold
        settings. We change the drift_ahead_threshold value using REST
        and then verify it against retreived value
        '''

        self.log.info('starting test_time_sync_threshold_setting')
        # bucket is created with lww in base test case using the LWW parameter
        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0],
                                                     self.buckets[0])
        ahead_threshold = int(
            client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
                        'Ahead threshold mismatch expected: {0} '\
                        'actual {1}'.format(LWWStatsTests.DEFAULT_THRESHOLD,
                                            ahead_threshold))
        # change the setting and verify it is per the new setting - this may or may not be supported
        cmd = "curl -X POST -u Administrator:password -d "\
        "'driftAheadThresholdMs={0}' http://{1}:8091/pools/default/"\
        "buckets/default".format(str(LWWStatsTests.DEFAULT_THRESHOLD/2000),#Rest API accepts value in milli-seconds
                                 self.servers[0].ip)
        self.log.info("Executing command: %s" % cmd)
        try:
            os.system(cmd)
        except Exception as err:
            self.fail('Exception occurred: %s' % str(err))
        time.sleep(10)
        cl_stats = client.stats()
        ahead_threshold = int(cl_stats["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            'Ahead threshold mismatch expected: {0} actual {1}'.format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold))
    def getr_dgm_test(self):
        resident_ratio = self.input.param("resident_ratio", 50)
        gens = []
        delta_items = 200000
        self.num_items = 0
        mc = MemcachedClientHelper.direct_client(self.master, self.default_bucket_name)

        self.log.info("LOAD PHASE")
        end_time = time.time() + self.wait_timeout * 30
        while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\
               int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\
              time.time() < end_time:
            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
            gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
                                    start=self.num_items, end=(self.num_items + delta_items))
            gens.append(copy.deepcopy(gen))
            self._load_all_buckets(self.master, gen, 'create', self.expiration, kv_store=1,
                                   flag=self.flags, only_store_hash=False, batch_size=1)
            self.num_items += delta_items
            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
        self.assertTrue(int(mc.stats()["vb_active_perc_mem_resident"]) < resident_ratio,
                        "Resident ratio is not reached")
        self.verify_cluster_stats(self.servers[:self.nodes_init], only_store_hash=False,
                                  batch_size=1)
        self.log.info("Currently loaded items: %s" % self.num_items)

        self.log.info("READ REPLICA PHASE")
        self.verify_cluster_stats(self.servers[:self.nodes_init], only_store_hash=False,
                                  replica_to_read=self.replica_to_read, batch_size=1)
    def block_for_replication(self, key, cas=0, num=1, timeout=0, persist=False):
        """
        observe a key until it has been replicated to @param num of servers

        @param persist : block until item has been persisted to disk
        """
        vbucketid = self.client._get_vBucket_id(key)
        repl_servers = self._get_server_str(vbucketid, repl=True)
        persisted = 0
        self.log.info("VbucketId:%s on replicated servers:%s" % (vbucketid, repl_servers))

        while len(repl_servers) >= num > 0:
            for server in repl_servers:
                node = self._get_node(server)
                self.log.info("Replicated Server:- %s" % (server))
                newclient = MemcachedClientHelper.direct_client(node, self.default_bucket_name)
                t_start = datetime.now()
                while persisted == 0:
                    opaque, rep_time, persist_time, persisted, cas = newclient.observe(key)
                t_end = datetime.now()
                self.log.info("######key:-%s and Server:- %s#########" % (key, server))
                self.log.info("Persisted:- %s" % (persisted))
                self.log.info("Time taken to persist:- %s" % (t_end - t_start))
                num = num + 1
                if num == 0:
                    break
        return True
 def _restart_memcache(self, bucket_name):
     rest = RestConnection(self.master)
     nodes = rest.node_statuses()
     self._kill_nodes(nodes, bucket_name)
     start = time.time()
     memcached_restarted = False
     for server in self.servers[:self.nodes_init]:
         mc = None
         while time.time() - start < 60:
             try:
                 mc = MemcachedClientHelper.direct_client(server, bucket_name)
                 stats = mc.stats()
                 new_uptime = int(stats["uptime"])
                 self.log.info("warmutime%s:%s" % (new_uptime, self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["uptime"]))
                 if new_uptime < self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["uptime"]:
                     self.log.info("memcached restarted...")
                     memcached_restarted = True
                     break;
             except Exception:
                 self.log.error("unable to connect to %s:%s" % (server.ip, server.port))
                 if mc:
                     mc.close()
                 time.sleep(1)
         if not memcached_restarted:
             self.fail("memcached did not start %s:%s" % (server.ip, server.port))
Exemple #10
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master

        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)

        msg = 'create_bucket succeeded but bucket "default" does not exist'

        if (testconstants.TESTRUNNER_CLIENT in os.environ.keys()
            ) and os.environ[
                testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK:
            self.client = SDKSmartClient(
                serverInfo,
                self._bucket_name,
                compression=TestInputSingleton.input.param(
                    "sdk_compression", True))
        else:
            self.client = MemcachedClientHelper.direct_client(
                serverInfo, self._bucket_name)

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Exemple #11
0
    def get_stats_memc(self, bucket_name, stat_name="", key=None):
        """
        Fetches stats using cbstat and greps for specific line.
        Uses command:
          cbstats localhost:port 'stat_name' | grep 'field_to_grep'

        Note: Function calling this API should take care of validating
        the outputs and handling the errors/warnings from execution.

        Arguments:
        :bucket_name   - Name of the bucket to get the stats
        :stat_name     - Any valid stat_command accepted by cbstats
        :field_to_grep - Target stat name string to grep.
                         Default=None, means fetch all data

        Returns:
        :output - Output for the cbstats command
        :error  - Buffer containing warnings/errors from the execution
        """
        # result = dict()
        if stat_name == "all":
            stat_name = ""
        client = MemcachedClientHelper.direct_client(
            self.server, Bucket({"name": bucket_name}), 30, self.username,
            self.password)
        output = client.stats(stat_name)
        client.close()
        return output if key is None else output[key]
Exemple #12
0
 def wait_warmup_completed(self, warmupnodes, bucket_names=["default"]):
     if isinstance(bucket_names, str):
         bucket_names = [bucket_names]
     for server in warmupnodes:
         for bucket in bucket_names:
             mc = MemcachedClientHelper.direct_client(server, bucket)
             start = time.time()
             while time.time() - start < 150:
                 if mc.stats()["ep_warmup_thread"] == "complete":
                     self._log.info("Warmed up: %s items " %
                                    (mc.stats()["curr_items_tot"]))
                     time.sleep(10)
                     break
                 elif mc.stats()["ep_warmup_thread"] == "running":
                     self._log.info(
                         "Still warming up .. curr_items_tot : %s" %
                         (mc.stats()["curr_items_tot"]))
                     continue
                 else:
                     self._log.info(
                         "Value of ep_warmup_thread does not exist, exiting from this server"
                     )
                     break
             if mc.stats()["ep_warmup_thread"] == "running":
                 self._log.info(
                     "ERROR: ep_warmup_thread's status not complete")
             mc.close
Exemple #13
0
    def test_time_sync_threshold_setting(self):
        '''
        @summary: This method checks for the change in drift threshold
        settings. We change the drift_ahead_threshold value using REST
        and then verify it against retreived value
        '''

        self.log.info('starting test_time_sync_threshold_setting')
        # bucket is created with lww in base test case using the LWW parameter
        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
                        'Ahead threshold mismatch expected: {0} '\
                        'actual {1}'.format(LWWStatsTests.DEFAULT_THRESHOLD,
                                            ahead_threshold))
        # change the setting and verify it is per the new setting - this may or may not be supported
        cmd = "curl -X POST -u Administrator:password -d "\
        "'driftAheadThresholdMs={0}' http://{1}:8091/pools/default/"\
        "buckets/default".format(str(LWWStatsTests.DEFAULT_THRESHOLD/2000),#Rest API accepts value in milli-seconds
                                 self.servers[0].ip)
        self.log.info("Executing command: %s"%cmd)
        try:
            os.system(cmd)
        except Exception,err:
            self.fail('Exception occurred: %s'%str(err))
    def memory_quota_default_bucket(self):
        resident_ratio = self.input.param("resident_ratio", 50)
        delta_items = 200000
        mc = MemcachedClientHelper.direct_client(self.master,
                                                 self.default_bucket_name)

        self.log.info("LOAD PHASE")
        end_time = time.time() + self.wait_timeout * 30
        while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or \
                           int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and \
                        time.time() < end_time:
            self.log.info("Resident ratio is %s" %
                          mc.stats()["vb_active_perc_mem_resident"])
            gen = DocumentGenerator('test_docs',
                                    '{{"age": {0}}}',
                                    xrange(5),
                                    start=self.num_items,
                                    end=(self.num_items + delta_items))
            self._load_all_buckets(self.master, gen, 'create', 0)
            self.num_items += delta_items
            self.log.info("Resident ratio is %s" %
                          mc.stats()["vb_active_perc_mem_resident"])
        memory_mb = int(mc.stats("memory")["mem_used"]) / (1024 * 1024)
        self.log.info("Memory Used is %s" % memory_mb)
        self.assertTrue(
            memory_mb <= self.quota,
            "Memory Used %s should be within %s" % (memory_mb, self.quota))
Exemple #15
0
 def _load_memcached_bucket(self, server, gen_load, bucket_name):
     num_tries = 0
     while num_tries < 6:
         try:
             num_tries += 1
             client = MemcachedClientHelper.direct_client(
                 server, bucket_name)
             break
         except Exception as ex:
             if num_tries < 5:
                 self.log.info(
                     "unable to create memcached client due to {0}. Try again"
                     .format(ex))
             else:
                 self.log.error(
                     "unable to create memcached client due to {0}.".format(
                         ex))
     while gen_load.has_next():
         key, value = gen_load.next()
         for v in xrange(1024):
             try:
                 client.set(key, 0, 0, value, v)
                 break
             except:
                 pass
     client.close()
Exemple #16
0
 def _kill_nodes(self, nodes, servers, bucket_name):
     self.reboot = self.input.param("reboot", True)
     if not self.reboot:
         for node in nodes:
             _node = {
                 "ip": node.ip,
                 "port": node.port,
                 "username": self.servers[0].rest_username,
                 "password": self.servers[0].rest_password
             }
             node_rest = RestConnection(_node)
             _mc = MemcachedClientHelper.direct_client(_node, bucket_name)
             self.log.info("restarted the node %s:%s" %
                           (node.ip, node.port))
             pid = _mc.stats()["pid"]
             command = "os:cmd(\"kill -9 {0} \")".format(pid)
             self.log.info(command)
             killed = node_rest.diag_eval(command)
             self.log.info("killed ??  {0} ".format(killed))
             _mc.close()
     else:
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             command = "reboot"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
             time.sleep(self.wait_timeout * 8)
             shell = RemoteMachineShellConnection(server)
             command = "/sbin/iptables -F"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
    def collect_vbucket_num_stats(self,servers, buckets):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information

            Returns:

            Failover stats as follows:
            if not collecting per node :: {bucket : [{key:value}]}
            if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        active_bucketMap = {}
        replica_bucketMap = {}
        for bucket in buckets:
            active_map_data = {}
            replica_map_data = {}
            for server in servers:
                client = MemcachedClientHelper.direct_client(server, bucket)
                stats = client.stats('')
                for key in stats.keys():
                    if key == 'vb_active_num':
                        active_map_data[server.ip] = int(stats[key])
                    if key == 'vb_replica_num':
                        replica_map_data[server.ip] = int(stats[key])
            active_bucketMap[bucket.name] = active_map_data
            replica_bucketMap[bucket.name] = replica_map_data
        return active_bucketMap,replica_bucketMap
 def run(self):
     client = MemcachedClientHelper.direct_client(server, bucket)
     for i in range(num_items):
         key = "key-{0}".format(i)
         value = "value-{0}".format(str(uuid.uuid4())[:7])
         client.set(key, 0, 0, value, 0)
     log.info("Loaded {0} key".format(num_items))
Exemple #19
0
    def run_test(self):
        ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
        active_resident_threshold = int(self.input.param("active_resident_threshold", 10))

        mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
        stats = mc.stats()
        threshold = int(self.input.param("threshold", stats[ep_threshold]))
        threshold_reached = False
        self.num_items = self.input.param("items", 10000)
        self._load_doc_data_all_buckets("create")

        # load items till reached threshold or mem-ratio is less than resident ratio threshold
        while not threshold_reached:
            mem_used = int(mc.stats()["mem_used"])
            if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
                self.log.info(
                    "mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
                    % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"])
                )
                items = self.num_items
                self.num_items += self.input.param("items", 10000)
                self._load_doc_data_all_buckets("create", items)
            else:
                threshold_reached = True
                self.log.info("DGM state achieved!!!!")

        # wait for draining of data before restart and warm up
        for bucket in self.buckets:
            RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket)

        while 1:

            #            read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])

            read_data_task = Thread(target=self._run_get)
            read_data_task.start()
            # 5 threads to run stats all and reset asynchronously
            start = time.time()
            while (time.time() - start) < 300:

                stats_all_thread = []
                stats_reset_thread = []

                for i in xrange(self.threads_to_run):
                    stat_str = ""
                    stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_all_thread[i].start()
                    stat_str = "reset"
                    stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_reset_thread[i].start()

                for i in xrange(self.threads_to_run):
                    stats_all_thread[i].join()
                    stats_reset_thread[i].join()

                del stats_all_thread
                del stats_reset_thread

            #            read_data_task.result()
            read_data_task.join()
Exemple #20
0
 def _stats_befor_warmup(self, bucket_name):
     self.pre_warmup_stats[bucket_name] = {}
     self.stats_monitor = self.input.param("stats_monitor", "")
     self.warmup_stats_monitor = self.input.param("warmup_stats_monitor", "")
     if self.stats_monitor is not "":
         self.stats_monitor = self.stats_monitor.split(";")
     if self.warmup_stats_monitor is not "":
         self.warmup_stats_monitor = self.warmup_stats_monitor.split(";")
     for server in self.servers:
         mc_conn = MemcachedClientHelper.direct_client(server, bucket_name, self.timeout)
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)] = {}
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["uptime"] = mc_conn.stats("")[
             "uptime"
         ]
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items_tot"] = mc_conn.stats(
             ""
         )["curr_items_tot"]
         self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items"] = mc_conn.stats("")[
             "curr_items"
         ]
         for stat_to_monitor in self.stats_monitor:
             self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][stat_to_monitor] = mc_conn.stats(
                 ""
             )[stat_to_monitor]
         if self.without_access_log:
             for stat_to_monitor in self.warmup_stats_monitor:
                 self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][
                     stat_to_monitor
                 ] = mc_conn.stats("warmup")[stat_to_monitor]
         mc_conn.close()
Exemple #21
0
    def test_time_sync_threshold_setting(self):

        self.log.info("starting test_time_sync_threshold_setting")

        # bucket is created with lww in base test case using the LWW parameter

        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold
            ),
        )
        # change the setting and verify it is per the new setting - this may or may not be supported

        shell = RemoteMachineShellConnection(self.servers[0])
        output, error = shell.execute_cbepctl(
            self.buckets[0],
            "",
            "set vbucket_param",
            "hlc_drift_ahead_threshold_us ",
            str(LWWStatsTests.DEFAULT_THRESHOLD / 2) + LWWStatsTests.DUMMY_VBUCKET,
        )
        if len(error) > 0:
            self.fail("Failed to set the drift counter threshold, please check the logs.")

        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold
            ),
        )
Exemple #22
0
 def test_expired_keys(self):
     serverInfo = self.master
     client = MemcachedClientHelper.direct_client(serverInfo, self._bucket_name)
     expirations = [2, 5, 10]
     for expiry in expirations:
         testuuid = uuid.uuid4()
         keys = ["key_%s_%d" % (testuuid, i) for i in range(500)]
         self.log.info("pushing keys with expiry set to {0}".format(expiry))
         for key in keys:
             try:
                 client.set(key, expiry, 0, key)
             except mc_bin_client.MemcachedError as error:
                 msg = "unable to push key : {0} to bucket : {1} error : {2}"
                 self.log.error(msg.format(key, client.vbucketId, error.status))
                 self.fail(msg.format(key, client.vbucketId, error.status))
         self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
         delay = expiry + 5
         msg = "sleeping for {0} seconds to wait for those items with expiry set to {1} to expire"
         self.log.info(msg.format(delay, expiry))
         time.sleep(delay)
         self.log.info('verifying that all those keys have expired...')
         for key in keys:
             try:
                 client.get(key=key)
                 msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                 self.fail(msg.format(expiry, key, delay))
             except mc_bin_client.MemcachedError as error:
                 self.assertEquals(error.status, 1,
                                   msg="expected error code {0} but saw error code {1}".format(1, error.status))
         self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
 def _verify_direct_client_stats(self, bucket, command, output):
     mc_conn = MemcachedClientHelper.direct_client(self.master,
                                                   bucket.name, self.timeout)
     for line in output:
         stats = line.rsplit(":", 1)
         collect_stats = ""
         commands = ["hash", "tapagg"]
         if command in commands:
             output, error = self.shell.execute_cbstats(bucket, command)
             d = []
             if len(output) > 0:
                 d = dict(s.strip().rsplit(':', 1) for s in output)
                 collect_stats = d[stats[0].strip()].strip()
             else:
                 raise Exception("Command does not throw out error message \
                                  but cbstats gives no output. \
                                  Please check the output manually")
         else:
             collect_stats = mc_conn.stats(command)[stats[0].strip()]
         self.log.info("CbStats###### for {0}:::{1}=={2}" \
                       .format(stats[0].strip(), collect_stats, stats[1].strip()))
         if stats[1].strip() == collect_stats:
             continue
         else:
             if stats[0].find('tcmalloc') != -1 or stats[0].find('bytes') != -1 or\
             stats[0].find('mem_used') != -1:
                 self.log.warn("Stat didn't match, but it can be changed, not a bug")
                 continue
             raise Exception("Command does not throw out error message \
                              but cbstats does not match.")
Exemple #24
0
    def get_vbucket_stats(self,
                          bucket_name,
                          stat_name,
                          vbucket_num,
                          field_to_grep=None):
        """
        Fetches failovers stats for specified vbucket
        and greps for specific stat.
        Uses command:
          cbstats localhost:port failovers '[vbucket_num]' | \
            grep '[field_to_grep]'

        Note: Function calling this API should take care of validating
        the outputs and handling the errors/warnings from execution.

        Arguments:
        :bucket_name   - Name of the bucket to get the stats
        :stat_name     - Any valid stat_command accepted by cbstats
        :vbucket_num   - Target vbucket number to fetch the stats
        :field_to_grep - Target stat name string to grep.
                         Default=None, means to fetch all stats related to
                         the selected vbucket stat

        Returns:
        :output - Output for the cbstats command
        :error  - Buffer containing warnings/errors from the execution
        """
        client = MemcachedClientHelper.direct_client(self.server,
                                                     Bucket(bucket_name), 30,
                                                     self.username,
                                                     self.password)
        output = client.stats("{} {}".format(stat_name, vbucket_num))
        client.close()
        return output
Exemple #25
0
    def test_logical_clock_ticks(self):

        self.log.info('starting test_logical_clock_ticks')

        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.servers[0].ip],
                               bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(
            self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # do a bunch of mutations to set the max cas
        gen_load = BlobGenerator('key-for-cas-test-logical-ticks',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        base_total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            #print vbucket_stats['vb_' + str(i) + ':logical_clock_ticks']
            base_total_logical_clock_ticks = base_total_logical_clock_ticks + int(
                vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])
        self.log.info('The base total logical clock ticks is {0}'.format(
            base_total_logical_clock_ticks))

        # move the system clock back so the logical counter part of HLC is used and the logical clock ticks
        # stat is incremented
        self.assertTrue(
            shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS),
            'Failed to advance the clock')

        # do more mutations
        NUMBER_OF_MUTATIONS = 10000
        gen_load = BlobGenerator('key-for-cas-test-logical-ticks',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=NUMBER_OF_MUTATIONS)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        time.sleep(30)
        total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            total_logical_clock_ticks = total_logical_clock_ticks + int(
                vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])

        self.log.info('The total logical clock ticks is {0}'.format(
            total_logical_clock_ticks))

        self.assertTrue(
            total_logical_clock_ticks -
            base_total_logical_clock_ticks == NUMBER_OF_MUTATIONS,
            'Expected clock tick {0} actual {1}'.format(
                NUMBER_OF_MUTATIONS,
                total_logical_clock_ticks - base_total_logical_clock_ticks))
Exemple #26
0
    def test_time_sync_threshold_setting_rest_call(self):

        self.log.info("starting test_time_sync_threshold_setting_rest_call")

        # bucket is created with lww in base test case using the LWW parameter

        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])

        rest = RestConnection(self.master)
        self.assertTrue(
            rest.set_cas_drift_threshold(self.buckets[0], 100000, 200000), "Unable to set the CAS drift threshold"
        )
        time.sleep(15)  # take a few seconds for the stats to settle in
        stats = client.stats()

        self.assertTrue(
            int(stats["ep_hlc_drift_ahead_threshold_us"]) == 100000 * 1000,
            "Ahead threshold incorrect. Expected {0} actual {1}".format(
                100000 * 1000, stats["ep_hlc_drift_ahead_threshold_us"]
            ),
        )

        self.assertTrue(
            int(stats["ep_hlc_drift_behind_threshold_us"]) == 200000 * 1000,
            "Ahead threshold incorrect. Expected {0} actual {1}".format(
                200000 * 1000, stats["ep_hlc_drift_behind_threshold_us"]
            ),
        )
    def collect_vbucket_num_stats(self,servers, buckets):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information

            Returns:

            Failover stats as follows:
            if not collecting per node :: {bucket : [{key:value}]}
            if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        active_bucketMap = {}
        replica_bucketMap = {}
        for bucket in buckets:
            active_map_data = {}
            replica_map_data = {}
            for server in servers:
                client = MemcachedClientHelper.direct_client(server, bucket)
                stats = client.stats('')
                for key in stats.keys():
                    if key == 'vb_active_num':
                        active_map_data[server.ip] = int(stats[key])
                    if key == 'vb_replica_num':
                        replica_map_data[server.ip] = int(stats[key])
            active_bucketMap[bucket.name] = active_map_data
            replica_bucketMap[bucket.name] = replica_map_data
        return active_bucketMap,replica_bucketMap
    def collect_dcp_stats(self, buckets, servers, stat_names = [],
                                extra_key_condition = "replication"):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information
            stat_names: stats we are searching to compare

            Returns:

            map of bucket informing map[bucket][vbucket id][stat name]

            example:: unacked_bytes in dcp
        """
        bucketMap = {}
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                stats = MemcachedClientHelper.direct_client(server, bucket).stats('dcp')
                for key in stats.keys():
                    for stat_name in stat_names:
                        if stat_name in key and extra_key_condition in key:
                            value = int(stats[key])
                            tokens = key.split(":")
                            vb_no = int(tokens[len(tokens) - 1].split("_")[1])
                            if vb_no not in dataMap:
                                dataMap[vb_no] = {}
                            dataMap[vb_no][stat_name] = value
            bucketMap[bucket.name] = dataMap
        return bucketMap
    def collect_dcp_stats(self, buckets, servers, stat_names = [], extra_key_condition = "replication"):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information
            stat_names: stats we are searching to compare

            Returns:

            map of bucket informing map[bucket][vbucket id][stat name]

            example:: unacked_bytes in dcp
        """
        bucketMap = {}
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                stats = MemcachedClientHelper.direct_client(server, bucket).stats('dcp')
                for key in stats.keys():
                    for stat_name in stat_names:
                        if stat_name in key and extra_key_condition in key:
                            value = int(stats[key])
                            tokens = key.split(":")
                            vb_no = int(tokens[len(tokens) - 1].split("_")[1])
                            if vb_no not in dataMap:
                                dataMap[vb_no] = {}
                            dataMap[vb_no][stat_name] = value
            bucketMap[bucket.name] = dataMap
        return bucketMap
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_persistence(self.master, bucket, bucket_type=self.bucket_type)
        self.assertTrue(ready, "not all items persisted. see logs")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEqual(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
Exemple #31
0
    def run_test(self):
        ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
        active_resident_threshold = int(self.input.param("active_resident_threshold", 10))

        mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
        stats = mc.stats()
        threshold = int(self.input.param('threshold', stats[ep_threshold]))
        threshold_reached = False
        self.num_items = self.input.param("items", 10000)
        self._load_doc_data_all_buckets('create')

        # load items till reached threshold or mem-ratio is less than resident ratio threshold
        while not threshold_reached :
            mem_used = int(mc.stats()["mem_used"])
            if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
                self.log.info("mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s " % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"]))
                items = self.num_items
                self.num_items += self.input.param("items", 10000)
                self._load_doc_data_all_buckets('create', items)
            else:
                threshold_reached = True
                self.log.info("DGM state achieved!!!!")

        # wait for draining of data before restart and warm up
        for bucket in self.buckets:
            RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket, bucket_type=self.bucket_type)


        while True:

#            read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])

            read_data_task = Thread(target=self._run_get)
            read_data_task.start()
            #5 threads to run stats all and reset asynchronously
            start = time.time()
            while (time.time() - start) < 300:

                stats_all_thread = []
                stats_reset_thread = []

                for i in range(self.threads_to_run):
                    stat_str = ''
                    stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_all_thread[i].start()
                    stat_str = 'reset'
                    stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_reset_thread[i].start()


                for i in range(self.threads_to_run):
                    stats_all_thread[i].join()
                    stats_reset_thread[i].join()

                del stats_all_thread
                del stats_reset_thread

#            read_data_task.result()
            read_data_task.join()
Exemple #32
0
    def test_expired_keys_tap(self):

        server = self.master
        # callback to track deletes
        queue = Queue(maxsize=10000)
        listener = TapListener(queue, server, "CMD_TAP_DELETE")

        client = MemcachedClientHelper.direct_client(server, self._bucket_name)
        expirations = [15]
        for expiry in expirations:
            testuuid = uuid.uuid4()
            keys = ["key_%s_%d" % (testuuid, i) for i in range(1000)]
            self.log.info("pushing keys with expiry set to {0}".format(expiry))
            for key in keys:
                try:
                    client.set(key, expiry, 0, key)
                except mc_bin_client.MemcachedError as error:
                    msg = "unable to push key : {0} to bucket : {1} error : {2}"
                    self.log.error(
                        msg.format(key, client.vbucketId, error.status))
                    self.fail(msg.format(key, client.vbucketId, error.status))
            self.log.info("inserted {0} keys with expiry set to {1}".format(
                len(keys), expiry))
            delay = expiry + 15
            msg = "sleeping for {0} seconds to wait for those items with expiry set to {1} to expire"
            self.log.info(msg.format(delay, expiry))
            time.sleep(delay)
            self.log.info('verifying that all those keys have expired...')
            for key in keys:
                try:
                    client.get(key=key)
                    msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                    self.fail(msg.format(expiry, key, delay))
                except mc_bin_client.MemcachedError as error:
                    self.assertEquals(
                        error.status,
                        1,
                        msg="expected error code {0} but saw error code {1}".
                        format(1, error.status))
            self.log.info(
                "verified that those keys inserted with expiry set to {0} have expired"
                .format(expiry))
            listener.start()
            try:
                was_empty = 0
                deletes_seen = 0
                while was_empty < 2:
                    try:
                        queue.get(False, 5)
                        deletes_seen += 1
                    except Empty:
                        print "exception thrown"
                        print "how many deletes_seen ? {0}".format(
                            deletes_seen)
                        was_empty += 1
                self.assertEquals(deletes_seen, 0, msg="some some deletes")
                self.log.info("seen {0} CMD_TAP_DELETE".format(deletes_seen))
            finally:
                listener.aborted = True
Exemple #33
0
    def wait_for_mc_stats_all_nodes(master, bucket, stat_key, stat_value,
                                    timeout_in_seconds=120, verbose=True):
        log = logging.getLogger("infra")
        log.info("waiting for bucket {0} stat : {1} to match {2} on {3}"
                 .format(bucket, stat_key, stat_value, master.ip))
        time_to_timeout = 0
        previous_stat_value = -1
        curr_stat_value = -1
        verified = False
        all_stats = {}
        while not verified:
            rest = RestConnection(master)
            nodes = rest.node_statuses()
            for node in nodes:
                _server = {"ip": node.ip, "port": node.port,
                           "username": master.rest_username,
                           "password": master.rest_password}
                # Failed over node is part of node_statuses but since
                # its failed over memcached connections to this node will fail
                node_self = RestConnection(_server).get_nodes_self()
                if node_self.clusterMembership == 'active':
                    mc = MemcachedClientHelper.direct_client(_server, bucket)
                    n_stats = mc.stats("")
                    mc.close()
                    all_stats[node.id] = n_stats
            actual_stat_value = -1
            for k in all_stats:
                if all_stats[k] and stat_key in all_stats[k]:
                    if actual_stat_value == -1:
                        log.info(all_stats[k][stat_key])
                        actual_stat_value = int(all_stats[k][stat_key])
                    else:
                        actual_stat_value += int(all_stats[k][stat_key])
            if actual_stat_value == stat_value:
                log.info("{0} : {1}".format(stat_key, actual_stat_value))
                verified = True
                break
            else:
                if verbose:
                    log.info("{0} : {1}".format(stat_key, actual_stat_value))
                curr_stat_value = actual_stat_value

                # values are changing so clear any timeout
                if curr_stat_value != previous_stat_value:
                    time_to_timeout = 0
                else:
                    if time_to_timeout == 0:
                        time_to_timeout = time.time() + timeout_in_seconds
                    if time_to_timeout < time.time():
                        log.info("no change in {0} stat after {1} seconds (value = {2})".format(stat_key, timeout_in_seconds, curr_stat_value))
                        break

                previous_stat_value = curr_stat_value

                if not verbose:
                    time.sleep(0.1)
                else:
                    time.sleep(2)
        return verified
    def set_get_test(self, value_size, number_of_items):
        fixed_value = MemcachedClientHelper.create_value("S", value_size)
        specs = [
            ("default", 0),
            ("set-get-bucket-replica-1", 1),
            ("set-get-bucket-replica-2", 2),
            ("set-get-bucket-replica-3", 3),
        ]
        serverInfo = self.master
        rest = RestConnection(serverInfo)
        bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)

        mcport = rest.get_nodes_self().memcached
        for name, replica in specs:
            rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)

        bucket_data = {}
        buckets = RestConnection(serverInfo).get_buckets()
        for bucket in buckets:
            bucket_data[bucket.name] = {}
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            self.test.assertTrue(ready, "wait_for_memcached failed")

            client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
            inserted = []
            rejected = []
            while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
                try:
                    key = str(uuid.uuid4())
                    client.set(key, 0, 0, fixed_value)
                    inserted.append(key)
                except mc_bin_client.MemcachedError:
                    pass

            retry = 0
            remaining_items = []
            remaining_items.extend(inserted)
            msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
            while retry < 10 and len(remaining_items) > 0:
                verified_keys = []
                for key in remaining_items:
                    try:
                        flag, keyx, value = client.get(key=key)
                        if not value == fixed_value:
                            self.test.fail("value mismatch for key {0}".format(key))
                        verified_keys.append(key)
                    except mc_bin_client.MemcachedError as error:
                        self.log.error(msg.format(error.status, key))
                    retry += 1
                [remaining_items.remove(x) for x in verified_keys]

            print_count = 0
            for key in remaining_items:
                if print_count > 100:
                    break
                print_count += 1
                self.log.error("unable to verify key : {0}".format(key))
            if remaining_items:
                self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
Exemple #35
0
    def _get_stats(self, stat_str='all'):

#        for server in self.nodes_server:
            server = self.servers[0]
            mc_conn = MemcachedClientHelper.direct_client(server, self.bucket_name, self.timeout)
            stat_result = mc_conn.stats(stat_str)
#            self.log.info("Getting stats {0} : {1}".format(stat_str, stat_result))
            self.log.info("Getting stats {0}".format(stat_str))
            mc_conn.close()
Exemple #36
0
    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
                        "Keys are not empty")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
                        "Missing keys")
Exemple #37
0
    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
                        "Keys are not empty")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
                        "Missing keys")
Exemple #38
0
 def _parallel_read(self):
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     while not self.reader_shutdown:
         for bucket in buckets:
             name = bucket.name.encode("ascii", "ignore")
             mc = MemcachedClientHelper.direct_client(self.master, name)
             for key in self.bucket_data[name]["inserted_keys"]:
                 mc.get(key)
Exemple #39
0
    def _get_stats(self, stat_str='all'):

#        for server in self.nodes_server:
            server = self.servers[0]
            mc_conn = MemcachedClientHelper.direct_client(server, self.bucket_name, self.timeout)
            stat_result = mc_conn.stats(stat_str)
#            self.log.info("Getting stats {0} : {1}".format(stat_str, stat_result))
            self.log.info("Getting stats {0}".format(stat_str))
            mc_conn.close()
Exemple #40
0
 def _parallel_read(self):
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     while not self.reader_shutdown:
         for bucket in buckets:
             name = bucket.name.encode("ascii", "ignore")
             mc = MemcachedClientHelper.direct_client(self.master, name)
             for key in self.bucket_data[name]["inserted_keys"]:
                 mc.get(key)
Exemple #41
0
    def set_get_test(self, value_size, number_of_items):
        fixed_value = MemcachedClientHelper.create_value("S", value_size)
        specs = [("default", 0),
                ("set-get-bucket-replica-1", 1),
                ("set-get-bucket-replica-2", 2),
                ("set-get-bucket-replica-3", 3)]
        serverInfo = self.master
        rest = RestConnection(serverInfo)
        bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)

        mcport = rest.get_nodes_self().memcached
        for name, replica in specs:
            rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)

        bucket_data = {}
        buckets = RestConnection(serverInfo).get_buckets()
        for bucket in buckets:
            bucket_data[bucket.name] = {}
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            self.test.assertTrue(ready, "wait_for_memcached failed")

            client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
            inserted = []
            rejected = []
            while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
                try:
                    key = str(uuid.uuid4())
                    client.set(key, 0, 0, fixed_value)
                    inserted.append(key)
                except mc_bin_client.MemcachedError:
                    pass

            retry = 0
            remaining_items = []
            remaining_items.extend(inserted)
            msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
            while retry < 10 and len(remaining_items) > 0:
                verified_keys = []
                for key in remaining_items:
                    try:
                        flag, keyx, value = client.get(key=key)
                        if not value == fixed_value:
                            self.test.fail("value mismatch for key {0}".format(key))
                        verified_keys.append(key)
                    except mc_bin_client.MemcachedError as error:
                        self.log.error(msg.format(error.status, key))
                    retry += 1
                [remaining_items.remove(x) for x in verified_keys]

            print_count = 0
            for key in remaining_items:
                if print_count > 100:
                    break
                print_count += 1
                self.log.error("unable to verify key : {0}".format(key))
            if remaining_items:
                self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
Exemple #42
0
 def _verify_direct_client_stats(self, bucket, command, output):
     mc_conn = MemcachedClientHelper.direct_client(self.master, bucket.name, self.timeout)
     for line in output:
         stats = line.split(":", 1)
         self.log.info("CbStats###### for %s:::%s==%s" % (stats[0], mc_conn.stats(command)[stats[0].strip()], stats[1].strip()))
         if stats[1].strip() == mc_conn.stats(command)[stats[0].strip()]:
             continue
         else:
             raise Exception("Command does not throw out error message but cbstats does not match.")
Exemple #43
0
    def setUp(self):
        super(EvictionDCP, self).setUp()
        self.dcp_client = DcpClient(self.master.ip, int(11210))
        self.dcp_client.sasl_auth_plain(self.master.rest_username, self.master.rest_password)
        self.dcp_client.bucket_select('default')
        self.dcp_client.open_producer(name='eviction', delete_times=True)

        self.rest = RestConnection(self.servers[0])
        self.client = MemcachedClientHelper.direct_client(self.master, 'default')
    def collect_vbucket_stats(self,
                              buckets,
                              servers,
                              collect_vbucket=True,
                              collect_vbucket_seqno=True,
                              collect_vbucket_details=True,
                              perNode=True):
        """
            Method to extract the vbuckets stats given by cbstats tool

            Paramters:

            buckets: bucket information
            servers: server information
            collect_vbucket: take vbucket type stats
            collect_vbucket_seqno: take vbucket-seqno type stats
            collect_vbucket_details: take vbucket-details type stats
            perNode: if True collects data per node else takes a union across nodes

            Returns:

            The output can be in two formats

            if we are doing per node data collection
            Vbucket Information :: {bucket { node : [vbucket_seqno {key:value}
                         U vbucket_details {key:value} U vbucket {key:value}]}}

            if we are not doing per node data collection
            Vbucket Information :: {bucket : [vbucket_seqno {key:value}
                          U vbucket_details {key:value} U vbucket {key:value}]}
        """
        bucketMap = {}
        vbucket = []
        vbucket_seqno = []
        vbucket_details = []
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                map_data = {}
                client = MemcachedClientHelper.direct_client(server, bucket)
                if collect_vbucket:
                    vbucket = client.stats('vbucket')
                    self.createMapVbucket(vbucket, map_data)
                if collect_vbucket_seqno:
                    vbucket_seqno = client.stats('vbucket-seqno')
                    self.createMapVbucket(vbucket_seqno, map_data)
                if collect_vbucket_details:
                    vbucket_details = client.stats('vbucket-details')
                    self.createMapVbucket(vbucket_details, map_data)
                if perNode:
                    dataMap[server.ip] = map_data
                else:
                    dataMap.update(map_data)
            bucketMap[bucket.name] = dataMap
        return bucketMap
Exemple #45
0
    def _warmup(self):
        warmed_up = False
        for server in self.servers:
            mc = MemcachedClientHelper.direct_client(server, self.bucket_name)
            start = time.time()
            if server == self.servers[0]:
                wait_time = 300
            else:
                wait_time = 60
                # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        self.log.info("ep_warmup_time is %s " % warmup_time)
                        self.log.info(
                            "Collected the stats %s for server %s:%s" % (stats["ep_warmup_time"], server.ip,
                                server.port))
                        break
                    else:
                        self.log.info(" Did not get the stats from the server yet, trying again.....")
                        time.sleep(2)
                except Exception as e:
                    self.log.error(
                        "Could not get warmup_time stats from server %s:%s, exception %s" % (server.ip,
                            server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds." % (
                        server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < self.timeout and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    self.log.info("warmup completed, awesome!!! Warmed up. %s items " % (mc.stats()["curr_items_tot"]))
                    time.sleep(5)
                    if mc.stats()["curr_items_tot"] == self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["curr_items_tot"]:
                        self._stats_report(server, mc.stats(self.stat_str))
                        warmed_up = True
                    else:
                        continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    self.log.info(
                                "still warming up .... curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
                else:
                    self.fail("Value of ep warmup thread does not exist, exiting from this server")
                time.sleep(5)
            mc.close()
        if warmed_up:
            return True
        else:
            return False
Exemple #46
0
    def wait_for_mc_stats_all_nodes(master, bucket, stat_key, stat_value, timeout_in_seconds=120, verbose=True):
        log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
                                                                                stat_value, master.ip))
        time_to_timeout = 0
        previous_stat_value = -1
        curr_stat_value = -1
        verified = False
        all_stats = {}
        while not verified:
            rest = RestConnection(master)
            nodes = rest.node_statuses()
            for node in nodes:
                _server = {"ip": node.ip, "port": node.port, "username": master.rest_username,
                           "password": master.rest_password}
                #failed over node is part of node_statuses but since its failed over memcached connections
                #to this node will fail
                node_self = RestConnection(_server).get_nodes_self()
                if node_self.clusterMembership == 'active':
                    mc = MemcachedClientHelper.direct_client(_server, bucket)
                    n_stats = mc.stats("")
                    mc.close()
                    all_stats[node.id] = n_stats
            actual_stat_value = -1
            for k in all_stats:
                if all_stats[k] and stat_key in all_stats[k]:
                    if actual_stat_value == -1:
                        print all_stats[k][stat_key]
                        actual_stat_value = int(all_stats[k][stat_key])
                    else:
                        actual_stat_value += int(all_stats[k][stat_key])
            if actual_stat_value == stat_value:
                log.info("{0} : {1}".format(stat_key, actual_stat_value))
                verified = True
                break
            else:
                if verbose:
                    log.info("{0} : {1}".format(stat_key, actual_stat_value))
                curr_stat_value = actual_stat_value

                # values are changing so clear any timeout
                if curr_stat_value != previous_stat_value:
                    time_to_timeout = 0
                else:
                    if time_to_timeout == 0:
                        time_to_timeout = time.time() + timeout_in_seconds
                    if time_to_timeout < time.time():
                        log.info("no change in {0} stat after {1} seconds (value = {2})".format(stat_key, timeout_in_seconds, curr_stat_value))
                        break

                previous_stat_value = curr_stat_value

                if not verbose:
                    time.sleep(0.1)
                else:
                    time.sleep(2)
        return verified
 def test_MB_12751(self):
     mc = MemcachedClientHelper.direct_client(self.master, "default")
     mc.set("hello", 0, 0, "world")
     mc.getl("hello", 15)
     try:
         ret = mc.replace("hello", 0, 0, "hello")
         self.fail("The document should be locked")
     except MemcachedError as e:
         if e.status != memcacheConstants.ERR_EXISTS:
             self.fail("Expected replace to return EEXISTS, returned: {0}".format(e.status))
 def test_MB_12751(self):
     mc = MemcachedClientHelper.direct_client(self.master, "default")
     mc.set("hello", 0, 0, "world")
     mc.getl("hello", 15)
     try:
         ret = mc.replace("hello", 0, 0, "hello")
         self.fail("The document should be locked")
     except MemcachedError, e:
         if e.status != memcacheConstants.ERR_EXISTS:
             self.fail("Expected replace to return EEXISTS, returned: {0}".format(e.status))
Exemple #49
0
    def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
        warmed_up = False
        log = logger.Logger.get_logger()
        for server in servers:
            mc = None
            start = time.time()
            # Try to get the stats for 5 minutes, else hit out.
            while time.time() - start < wait_time:
                # Get the wamrup time for each server
                try:
                    mc = MemcachedClientHelper.direct_client(
                        server, bucket_name)
                    stats = mc.stats()
                    if stats is not None:
                        warmup_time = int(stats["ep_warmup_time"])
                        log.info("ep_warmup_time is %s " % warmup_time)
                        log.info(
                            "Collected the stats 'ep_warmup_time' %s for server %s:%s"
                            %
                            (stats["ep_warmup_time"], server.ip, server.port))
                        break
                    else:
                        log.info(
                            " Did not get the stats from the server yet, trying again....."
                        )
                        time.sleep(2)
                except Exception as e:
                    log.error(
                        "Could not get ep_warmup_time stats from server %s:%s, exception %s"
                        % (server.ip, server.port, e))
            else:
                self.fail(
                    "Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds."
                    % (server.ip, server.port, wait_time))

            # Waiting for warm-up
            start = time.time()
            warmed_up = False
            while time.time() - start < wait_time and not warmed_up:
                if mc.stats()["ep_warmup_thread"] == "complete":
                    log.info(
                        "warmup completed, awesome!!! Warmed up. %s items " %
                        (mc.stats()["curr_items_tot"]))
                    warmed_up = True
                    continue
                elif mc.stats()["ep_warmup_thread"] == "running":
                    log.info("still warming up .... curr_items_tot : %s" %
                             (mc.stats()["curr_items_tot"]))
                else:
                    fail(
                        "Value of ep warmup thread does not exist, exiting from this server"
                    )
                time.sleep(5)
            mc.close()
        return warmed_up
Exemple #50
0
 def test_warmup(self):
     ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
     active_resident_threshold = int(
         self.input.param("active_resident_threshold", 110))
     access_log_time = self.input.param("access_log_time", 2)
     mc = MemcachedClientHelper.direct_client(self.servers[0],
                                              self.bucket_name)
     stats = mc.stats()
     threshold = int(self.input.param('threshold', stats[ep_threshold]))
     threshold_reached = False
     self.num_items = self.input.param("items", 10000)
     self._load_doc_data_all_buckets('create')
     #load items till reached threshold or mem-ratio is less than resident ratio threshold
     while not threshold_reached:
         mem_used = int(mc.stats()["mem_used"])
         if mem_used < threshold or int(mc.stats(
         )["vb_active_perc_mem_resident"]) >= active_resident_threshold:
             self.log.info(
                 "mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
                 % (mem_used, threshold,
                    mc.stats()["vb_active_perc_mem_resident"]))
             items = self.num_items
             self.num_items += self.input.param("items", 10000)
             self._load_doc_data_all_buckets('create', items)
         else:
             threshold_reached = True
             self.log.info("DGM state achieved!!!!")
     #parallel load of data
     items = self.num_items
     self.num_items += 10000
     tasks = self._async_load_doc_data_all_buckets('create', items)
     #wait for draining of data before restart and warm up
     rest = RestConnection(self.servers[0])
     self.servers = rest.get_nodes()
     self._wait_for_stats_all_buckets(self.servers)
     self._stats_befor_warmup()
     for task in tasks:
         task.result()
     #If warmup is done through access log then run access scanner
     if self.access_log:
         scanner_runs = int(mc.stats()["ep_num_access_scanner_runs"])
         self.log.info("setting access scanner time %s minutes" %
                       access_log_time)
         self.log.info("current access scanner run is %s" % scanner_runs)
         ClusterOperationHelper.flushctl_set(self.servers[0],
                                             "alog_sleep_time",
                                             access_log_time,
                                             self.bucket_name)
         if not self._wait_for_access_run(access_log_time, scanner_runs,
                                          mc):
             self.fail("Not able to create access log within %s" %
                       access_log_time)
     self._restart_memcache()
     if self._warmup():
         self._load_doc_data_all_buckets('update', self.num_items - items)
Exemple #51
0
    def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key), key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
Exemple #52
0
    def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key), key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
    def init_clients(self):
        """Initialise clients for all servers there are vBuckets on

        It returns a dict with 'ip:port' as key (this information is also
        stored this way in every vBucket in the `master` property) and
        the MemcachedClient as the value
        """
        clients = {}
        for vbucket in self.bucket.vbuckets:
            if vbucket.master not in clients:
                ip, port = vbucket.master.split(':')
                sport = str((int(port[-2:])) / 2 + int(self.master.port))
                if sport:
                    clients[vbucket.master] = \
                        MemcachedClientHelper.direct_client(self._get_server(sport),\
                                                            self.default_bucket_name)
                else:
                    clients[vbucket.master] = MemcachedClientHelper.direct_client(self.master,\
                                                                     self.default_bucket_name)
        return clients
    def test_partial_rollback_via_memcached_restart_and_persistance_stopped(self):
        self.log.info("Load data in the default bucket")
        self.perform_doc_ops_in_all_cb_buckets("create", 0, self.num_items, exp=0)
        self.bucket_util.verify_stats_all_buckets(self.num_items)

        self.log.info("Create connection")
        self.cbas_util.createConn(self.cb_bucket_name)

        self.log.info("Create additional CBAS bucket and connect after failover logs are generated")
        secondary_cbas_bucket_name = self.cbas_bucket_name + "_secondary"
        secondary_dataset = self.cbas_dataset_name + "_secondary"

        self.log.info("Create dataset on the CBAS bucket")
        self.cbas_util.create_dataset_on_bucket(cbas_bucket_name=self.cb_bucket_name,
                                                cbas_dataset_name=self.cbas_dataset_name)

        self.log.info("Create dataset on the CBAS secondary bucket")
        self.cbas_util.create_dataset_on_bucket(cbas_bucket_name=self.cb_bucket_name,
                                                cbas_dataset_name=secondary_dataset)

        self.log.info("Connect to Bucket")
        self.cbas_util.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name,
                                         cb_bucket_password=self.cb_bucket_password)

        self.log.info("Validate count on CBAS")
        self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items)

        self.log.info("Establish remote shell to master node")
        shell = RemoteMachineShellConnection(self.cluster.master)

        number_of_times_memcached_restart = self.input.param("number_of_times_memcached_restart", 16)
        for i in range(number_of_times_memcached_restart):

            self.log.info("Stop persistance on KV node")
            mem_client = MemcachedClientHelper.direct_client(self.cluster.master,
                                                         self.cb_bucket_name)
            mem_client.stop_persistence()

            self.log.info("Add documents with persistance stopped")
            self.perform_doc_ops_in_all_cb_buckets("create", self.num_items, self.num_items + (self.num_items / 2), exp=0)

            self.log.info("Validate count on CBAS")
            self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items+ (self.num_items / 2))

            self.log.info("Kill memcached on KV node %s" %str(i))
            shell.kill_memcached()
            self.sleep(2, "Wait for for DCP rollback sent to CBAS and memcached restart")

            self.log.info("Validate count on CBAS")
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.num_items), msg="Count mismatch")

        self.log.info("Verify connect to second CBAS Bucket succeeds post long failure logs")
        self.assertTrue(self.cbas_util.connect_to_bucket(cbas_bucket_name=secondary_cbas_bucket_name,
                                         cb_bucket_password=self.cb_bucket_password), msg="Failed to connect CBAS bucket after long failover logs")
Exemple #55
0
    def membase_stats(self,nodes, bucket, frequency, verbose=False):
        mcs = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                mcs.append(MemcachedClientHelper.direct_client(node, bucket))
            except:
                pass
        self._task["membasestats"] = []
        self._task["timings"] = []
        self._task["dispatcher"] = []
        d = {}
        #        "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}]
        for mc in mcs:
            d[mc.host] = {"snapshots": [], "timings":[], "dispatcher":[]}

        while not self._aborted():
            time_left = frequency
            # at minimum we want to check for aborted every minute
            while not self._aborted() and time_left > 0:
                time.sleep(min(time_left, 60))
                time_left -= 60
            for mc in mcs:
                stats = mc.stats()
                stats["time"] = time.time()
                stats["ip"] = mc.host
                d[mc.host]["snapshots"].append(stats)
                timings = mc.stats('timings')
                d[mc.host]["timings"].append(timings)
                dispatcher = mc.stats('dispatcher')
                d[mc.host]["dispatcher"].append(dispatcher)

        start_time = str(self._task["time"])
        for mc in mcs:
            ip = mc.host
            unique_id = ip+'-'+start_time
            current_time = time.time()
            for snapshot in d[mc.host]["snapshots"]:
                snapshot['unique_id'] = unique_id
                snapshot['time'] = current_time
                snapshot['ip'] = ip
                self._task["membasestats"].append(snapshot)
            for timing in d[mc.host]["timings"]:
                timing['unique_id'] = unique_id
                timing['time'] = current_time
                timing['ip'] = ip
                self._task["timings"].append(timing)
            for dispatcher in d[mc.host]["dispatcher"]:
                dispatcher['unique_id'] = unique_id
                dispatcher['time'] = current_time
                dispatcher['ip'] = ip
                self._task["dispatcher"].append(dispatcher)

        print " finished membase_stats"
Exemple #56
0
    def test_rollback_and_persistence_race_condition(self):

        nodeA = self.servers[0]
        vbucket_client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        gen_create = BlobGenerator('dcp', 'dcp-', 64, start=0, end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        # stop persistence
        for bucket in self.buckets:
            for s in self.servers:
                client = MemcachedClientHelper.direct_client(s, bucket)
                client.stop_persistence()

        vb_uuid, seqno, high_seqno = self.vb_info(self.servers[0], 5)



        time.sleep(10)

        # more (non-intersecting) load
        gen_create = BlobGenerator('dcp-secondgroup', 'dcpsecondgroup-', 64, start=0, end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)


        shell = RemoteMachineShellConnection(self.servers[0])
        shell.kill_memcached()


        time.sleep(10)



        mc1 = MemcachedClientHelper.direct_client(self.servers[0], "default")
        mc2 = MemcachedClientHelper.direct_client(self.servers[1], "default")

        node1_items = mc1.stats()["curr_items_tot"]
        node2_items = mc2.stats()["curr_items_tot"]

        self.assertTrue( node1_items == node2_items,
                         'Node items not equal. Node 1:{0}, node 2:{1}'.format(node1_items, node2_items ))
        
Exemple #57
0
    def test_rollback_and_persistence_race_condition(self):

        nodeA = self.servers[0]
        vbucket_client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        gen_create = BlobGenerator('dcp', 'dcp-', 64, start=0, end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        # stop persistence
        for bucket in self.buckets:
            for s in self.servers[:self.nodes_init]:
                client = MemcachedClientHelper.direct_client(s, bucket)
                try:
                    client.stop_persistence()
                except MemcachedError as e:
                    if self.bucket_type == 'ephemeral':
                        self.assertTrue("Memcached error #4 'Invalid':  Flusher not running. for vbucket :0 to mc " in e.message)
                        return
                    else:
                        raise

        vb_uuid, seqno, high_seqno = self.vb_info(self.servers[0], 5)

        time.sleep(10)

        # more (non-intersecting) load
        gen_create = BlobGenerator('dcp-secondgroup', 'dcpsecondgroup-', 64, start=0, end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        shell = RemoteMachineShellConnection(self.servers[0])
        shell.kill_memcached()

        time.sleep(10)

        mc1 = MemcachedClientHelper.direct_client(self.servers[0], "default")
        mc2 = MemcachedClientHelper.direct_client(self.servers[1], "default")

        node1_items = mc1.stats()["curr_items_tot"]
        node2_items = mc2.stats()["curr_items_tot"]

        self.assertTrue(node1_items == node2_items,
                        'Node items not equal. Node 1:{0}, node 2:{1}'.format(node1_items, node2_items))
    def collect_failovers_stats(self, buckets, servers, perNode=True):
        """
            Method to extract the failovers stats given by cbstats tool

            Paramters:

            buckets: bucket informaiton
            servers: server information
            perNode: if set collect per node information else all

            Returns:

            Failover stats as follows:
            if not collecting per node :: {bucket : [{key:value}]}
            if collecting per node :: {bucket : {node:[{key:value}]}}
        """
        bucketMap = {}
        for bucket in buckets:
            bucketMap[bucket.name] = {}
        for bucket in buckets:
            dataMap = {}
            for server in servers:
                client = MemcachedClientHelper.direct_client(server, bucket)
                stats = client.stats('failovers')
                map_data = {}
                num_map = {}
                for o in stats.keys():
                    tokens = o.split(":")
                    vb = tokens[0]
                    key = tokens[1]
                    value = stats[o].split()
                    num = -1
                    if len(tokens) == 3:
                        vb = tokens[0]
                        num = int(tokens[1])
                        key = tokens[2]
                    if vb in map_data.keys() and (num == num_map[vb]
                                                  or num > num_map[vb]):
                        map_data[vb][key] = value[0]
                        num_map[vb] = num
                    elif vb in map_data.keys() and key == "num_entries":
                        map_data[vb][key] = value[0]
                    elif vb not in map_data.keys():
                        m = {}
                        m[key] = value[0]
                        map_data[vb] = m
                        num_map[vb] = num
                if perNode:
                    dataMap[server.ip] = map_data
                else:
                    dataMap.update(map_data)
            bucketMap[bucket.name] = dataMap
        return bucketMap
Exemple #59
0
 def get_checkpoints_from_cluster(self, master, bucket):
     parser = CheckpointStatParser()
     rest = RestConnection(master)
     servers = rest.get_nodes()
     merged = {}
     for server in servers:
         mc = MemcachedClientHelper.direct_client(server, bucket)
         per_node_checkpoint = mc.stats("checkpoint")
         parsed = parser.parse_output(per_node_checkpoint, server.id)
         merged = parser.merge_results(merged, parsed)
         mc.close()
     return merged