Exemple #1
0
    def corrupt_cas_is_healed_on_reboot(self):
        self.log.info('Start corrupt_cas_is_healed_on_reboot')

        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')

        # set a key
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'}))
        # client.memcached(KEY_NAME).set('k2', 0, 0,json.dumps({'value':'value2'}))

        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)

        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active,KEY_NAME)

        # print 'max cas k2', mc_active.getMeta('k2')[4]

        # CAS should be 0 now, do some gets and sets to verify that nothing bad happens

        # self._restart_memcache('default')
        remote = RemoteMachineShellConnection(self.master)
        remote.stop_server()
        time.sleep(30)
        remote.start_server()
        time.sleep(30)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)

        maxCas = mc_active.getMeta(KEY_NAME)[4]
        self.assertTrue(maxCas == 0, 'max cas after reboot is not 0 it is {0}'.format(maxCas))
Exemple #2
0
    def corrupt_cas_is_healed_on_reboot(self):
        self.log.info('Start corrupt_cas_is_healed_on_reboot')

        KEY_NAME = 'key1'

        rest = RestConnection(self.master)

        # set a key
        client = VBucketAwareMemcached(rest, 'default')
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                       json.dumps({'value': 'value1'}))
        # client.memcached(KEY_NAME).set('k2', 0, 0,json.dumps({'value':'value2'}))

        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)

        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active, KEY_NAME)
        corrupt_cas = mc_active.getMeta(KEY_NAME)[4]

        # self._restart_memcache('default')
        remote = RemoteMachineShellConnection(self.master)
        remote.stop_server()
        time.sleep(30)
        remote.start_server()
        time.sleep(30)

        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)

        curr_cas = mc_active.getMeta(KEY_NAME)[4]
        self.assertTrue(
            curr_cas == corrupt_cas,
            'Corrupted cas (%s) != curr_cas (%s)' % (corrupt_cas, curr_cas))
Exemple #3
0
 def do_verification(kv_store, rest, bucket):
     keys = kv_store.keys()
     smart = VBucketAwareMemcached(rest, bucket)
     validation_failures = {}
     for k in keys:
         expected = kv_store.read(k)
         if expected:
             if expected["status"] == "deleted":
                 # it should not exist there ?
                 try:
                     smart.memcached(k).get(k)
                     validation_failures[k] = ["deleted key"]
                 except MemcachedError:
                     pass
             elif expected["status"] == "expired":
                 try:
                     smart.memcached(k).get(k)
                     validation_failures[k] = ["expired key"]
                 except MemcachedError:
                     pass
             else:
                 try:
                     x, y, value = smart.memcached(k).get(k)
                     actualmd5 = hashlib.md5(value).digest()
                     if actualmd5 != expected["value"]:
                         validation_failures[k] = ["value mismatch"]
                 except MemcachedError:
                     validation_failures[k] = ["key not found"]
     return validation_failures
 def do_verification(kv_store, rest, bucket):
     keys = kv_store.keys()
     smart = VBucketAwareMemcached(rest, bucket)
     validation_failures = {}
     for k in keys:
         expected = kv_store.read(k)
         if expected:
             if expected["status"] == "deleted":
                 # it should not exist there ?
                 try:
                     smart.memcached(k).get(k)
                     validation_failures[k] = ["deleted key"]
                 except MemcachedError:
                     pass
             elif expected["status"] == "expired":
                 try:
                     smart.memcached(k).get(k)
                     validation_failures[k] = ["expired key"]
                 except MemcachedError:
                     pass
             else:
                 try:
                    x, y, value = smart.memcached(k).get(k)
                    actualmd5 = hashlib.md5(value).digest()
                    if actualmd5 != expected["value"]:
                       validation_failures[k] = ["value mismatch"]
                 except  MemcachedError:
                    validation_failures[k] = ["key not found"]
     return validation_failures
    def test_CASnotzero(self):
        # MB-31149
        # observe.observeseqnotests.ObserveSeqNoTests.test_CASnotzero
        # set value, append and check CAS value
        self.log.info('Starting test_CASnotzero')

        # without hello(mutationseqencenumber)
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        KEY_NAME = "test1key"
        client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
        client.generic_request(
            client.memcached(KEY_NAME).append, 'test1key', 'appended data')
        get_meta_resp = client.generic_request(
            client.memcached(KEY_NAME).getMeta, 'test1key')
        self.log.info(
            'the CAS value without hello(mutationseqencenumber): {} '.format(
                get_meta_resp[4]))
        self.assertNotEqual(get_meta_resp[4], 0)

        # with hello(mutationseqencenumber)
        KEY_NAME = "test2key"
        client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value1'}))
        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)
        client.generic_request(
            client.memcached(KEY_NAME).append, 'test2key', 'appended data456')

        get_meta_resp = client.generic_request(
            client.memcached(KEY_NAME).getMeta, 'test2key')
        self.log.info(
            'the CAS value with hello(mutationseqencenumber): {} '.format(
                get_meta_resp[4]))
        self.assertNotEqual(get_meta_resp[4], 0)
Exemple #6
0
    def do_get_random_key(self):
        # MB-31548, get_Random key gets hung sometimes.
        self.log.info("Creating few docs in the bucket")
        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        key = "test_docs-"
        for index in range(1000):
            doc_key = key + str(index)
            client.memcached(doc_key).set(doc_key, 0, 0,
                                          json.dumps({'value': 'value1'}))

        self.log.info("Performing random_gets")
        mc = MemcachedClient(self.master.ip, 11210)
        mc.sasl_auth_plain(self.master.rest_username,
                           self.master.rest_password)
        mc.bucket_select('default')

        count = 0
        while (count < 1000000):
            count = count + 1
            try:
                mc.get_random_key()
            except MemcachedError as error:
                self.fail("<MemcachedError #%d ``%s''>" %
                          (error.status, error.message))
            if count % 1000 == 0:
                self.log.info('The number of iteration is {}'.format(count))
Exemple #7
0
 def insert_key(serverInfo, bucket_name, count, size):
     rest = RestConnection(serverInfo)
     smart = VBucketAwareMemcached(rest, bucket_name)
     for i in xrange(count * 1000):
         key = "key_" + str(i)
         flag = random.randint(1, 999)
         value = {"value": MemcachedClientHelper.create_value("*", size)}
         smart.memcached(key).set(key, 0, 0, json.dumps(value))
Exemple #8
0
 def insert_key(serverInfo, bucket_name, count, size):
     rest = RestConnection(serverInfo)
     smart = VBucketAwareMemcached(rest, bucket_name)
     for i in xrange(count * 1000):
         key = "key_" + str(i)
         flag = random.randint(1, 999)
         value = {"value": MemcachedClientHelper.create_value("*", size)}
         smart.memcached(key).set(key, 0, 0, json.dumps(value))
Exemple #9
0
    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                           json.dumps({'value': value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id)
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int(
            mc_active.stats('vbucket-details')[
                'vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'])

        self.assertTrue(
            cas_active == max_cas,
            '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [],
                                                 [self.master])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME,
                                           request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:],
                                                 [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(
            replica_CAS == active_CAS,
            'cas mismatch active: {0} replica {1}'.format(
                active_CAS, replica_CAS))
Exemple #10
0
    def test_MB_32114(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        if self.maxttl:
            self._expiry_pager(self.master)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket='default')
        KEY_NAME = 'key1'

        for i in range(1000):
            mcd = client.memcached(KEY_NAME + str(i))
            rc = mcd.set(KEY_NAME + str(i), 0, 0,
                         json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                KEY_NAME + str(i),
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # wait for it to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = client.observe(
                    KEY_NAME + str(i))

        start_time = time.time()
        self._load_doc_data_all_buckets(batch_size=1000)
        end_time = time.time()

        for i in range(1000):
            try:
                mcd = client.memcached(KEY_NAME + str(i))
                _, flags, exp, seqno, cas = client.memcached(
                    KEY_NAME + str(i)).getMeta(KEY_NAME + str(i))
                rc = mcd.del_with_meta(KEY_NAME + str(i), 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
        self.cluster.compact_bucket(self.master, "default")
        if self.maxttl:
            time_to_sleep = (self.maxttl - (end_time - start_time)) + 20
            self.sleep(int(time_to_sleep))
        else:
            self.sleep(60)
        active_bucket_items = rest.get_active_key_count("default")
        replica_bucket_items = rest.get_replica_key_count("default")
        print('active_bucket_items ', active_bucket_items)
        print('replica_bucket_items ', replica_bucket_items)
        if active_bucket_items * self.num_replicas != replica_bucket_items:
            self.fail("Mismatch in data !!!")
Exemple #11
0
    def test_meta_failover(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        time.sleep(60)

        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        #print 'replica CAS {0}'.format(replica_CAS)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')
    def corrupt_cas_is_healed_on_rebalance_out_in(self):

        self.log.info('Start corrupt_cas_is_healed_on_rebalance_out_in')

        KEY_NAME = 'key1'


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')

        # set a key
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'}))


        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)
        mc_replica = client.memcached( KEY_NAME, replica_index=0 )


        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active,KEY_NAME)

        # CAS should be 0 now, do some gets and sets to verify that nothing bad happens


        resp = mc_active.get(KEY_NAME)
        self.log.info( 'get for {0} is {1}'.format(KEY_NAME, resp))


        # remove that node
        self.log.info('Remove the node with -1 max cas')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])
        #rebalance = self.cluster.async_rebalance([self.master], [], self.servers[-1:])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]



        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])
        #rebalance = self.cluster.async_rebalance([self.master], self.servers[-1:],[])

        rebalance.result()


        # verify the CAS is good
        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]


        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
    def corrupt_cas_is_healed_on_rebalance_out_in(self):

        self.log.info('Start corrupt_cas_is_healed_on_rebalance_out_in')

        KEY_NAME = 'key1'


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')

        # set a key
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'}))


        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)
        mc_replica = client.memcached( KEY_NAME, replica_index=0 )


        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active,KEY_NAME)

        # CAS should be 0 now, do some gets and sets to verify that nothing bad happens


        resp = mc_active.get(KEY_NAME)
        self.log.info( 'get for {0} is {1}'.format(KEY_NAME, resp))


        # remove that node
        self.log.info('Remove the node with -1 max cas')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])
        #rebalance = self.cluster.async_rebalance([self.master], [], self.servers[-1:])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]



        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])
        #rebalance = self.cluster.async_rebalance([self.master], self.servers[-1:],[])

        rebalance.result()


        # verify the CAS is good
        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]


        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
Exemple #14
0
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'

        gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
        gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
                                                     cluster_host="localhost:8091",
                                                     user=self.master.rest_username,
                                                     password=self.master.rest_password,
                                                     options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            self.servers[:self.nodes_init], self,
            wait_time=self.wait_timeout, wait_if_warmup=True)
        self.sleep(10, 'Wait some time before next load')
        # self._load_all_buckets(self.master, gen_create2, "create", 0)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
            self.fail('Bucket is incorrectly functional')
        except MemcachedError as e:
            pass  # this is the exception we are hoping for
Exemple #15
0
    def key_not_exists_test(self):
        self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required')
        bucket = self.buckets[0].name
        client = VBucketAwareMemcached(RestConnection(self.master), bucket)
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, 0, 0, "x")
            # delete and verify get fails
            client.delete(KEY_NAME)
            err = None
            try:
                rc = client.get(KEY_NAME)
            except MemcachedError as error:
                 # It is expected to raise MemcachedError because the key is deleted.
                 err = error.status
            self.assertTrue(err == ERR_NOT_FOUND, 'expected key to be deleted {0}'.format(KEY_NAME))

            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            err = None
            try:
                #For some reason replace instead of cas would not reproduce the bug
                mc_active = client.memcached(KEY_NAME)
                mc_active.replace(KEY_NAME, 0, 10, "value")
            except MemcachedError as error:
                err = error.status
            self.assertTrue(err == ERR_NOT_FOUND, 'was able to replace cas on removed key {0}'.format(KEY_NAME))
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'

        gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
        gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
                                                         cluster_host="localhost",
                                                         user=self.master.rest_username,
                                                         password=self.master.rest_password,
                                                         options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
                                            self.servers[:self.nodes_init], self,
                                            wait_time=self.wait_timeout, wait_if_warmup=True)
        self.sleep(10, 'Wait some time before next load')
        #self._load_all_buckets(self.master, gen_create2, "create", 0)
        #import pdb;pdb.set_trace()


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
            self.fail('Bucket is incorrectly functional')
        except MemcachedError, e:
            pass   # this is the exception we are hoping for
Exemple #17
0
    def _getr_items(self, item_count, replica_count, prefix, vprefix=""):
        time_start = time.time()
        get_count = 0
        last_error = ""
        error_count = 0
        awareness = VBucketAwareMemcached(self.rest, self.default_bucket_name)
        for r in range(replica_count):
            for i in range(item_count):
                retry = True

                key = prefix + "_key_" + str(i)
                while retry:
                    client = awareness.memcached(key, r)
                    try:
                        value = client.getr(prefix + "_key_" + str(i))[2]
                        assert(value == vprefix + "_value_" + str(i))
                        get_count += 1
                        retry = False
                    except mc_bin_client.MemcachedError as e:
                        last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e)
                        error_count += 1
                        if e.status == 7:
                            self.log.info("getting new vbucket map {0}")
                            awareness.reset(self.rest)
                        else:
                            retry = False
                    except Exception as e:
                        last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e)
                        error_count += 1
                        retry = False
        if error_count > 0:
            self.log.error("got {0} errors, last error: {1}".format(error_count, last_error))
        self.log.info("got {0} replica items in {1} seconds".format(get_count, time.time() - time_start))
        awareness.done()
        return get_count
Exemple #18
0
    def key_not_exists_test(self):
        self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required')
        bucket = self.buckets[0].name
        client = VBucketAwareMemcached(RestConnection(self.master), bucket)
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, 0, 0, "x")
            # delete and verify get fails
            client.delete(KEY_NAME)
            err = None
            try:
                rc = client.get(KEY_NAME)
            except MemcachedError as error:
                # It is expected to raise MemcachedError because the key is deleted.
                err = error.status
            self.assertTrue(err == ERR_NOT_FOUND,
                            'expected key to be deleted {0}'.format(KEY_NAME))

            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            err = None
            try:
                #For some reason replace instead of cas would not reproduce the bug
                mc_active = client.memcached(KEY_NAME)
                mc_active.replace(KEY_NAME, 0, 10, "value")
            except MemcachedError as error:
                err = error.status
            self.assertTrue(
                err == ERR_NOT_FOUND,
                'was able to replace cas on removed key {0}'.format(KEY_NAME))
Exemple #19
0
 def get_rev_info(rest_conn, bucket, keys):
     vbmc = VBucketAwareMemcached(rest_conn,bucket)
     ris = []
     for k in keys:
         mc = vbmc.memcached(k)
         ri = mc.getRev(k)
         ris.append(ri)
     return ris
Exemple #20
0
    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                           json.dumps({'value': value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id)
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int(
            mc_active.stats('vbucket-details')[
                'vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'])

        self.assertTrue(cas_pre == max_cas,
                        '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        time.sleep(60)
        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,
                                          request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(
            cas_pre == cas_post,
            'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
    def test_expiry_after_append(self):
        # create a doc and set expiry for the doc
        # append to the doc and check if the expiry is not changed

        self.key = "expiry_after_append"
        array = {
            "name": "Douglas Reynholm",
            "place": "India",
        }
        jsonDump = json.dumps(array)
        self.client.set(self.key, 60, 0, jsonDump)
        client1 = VBucketAwareMemcached(RestConnection(self.master), 'default')
        get_meta_resp_before = client1.generic_request(client1.memcached(self.key).getMeta, self.key)
        self.log.info("Sleeping for 5 sec")
        time.sleep(5)
        client1.generic_request(client1.memcached(self.key).append, self.key, 'appended data')
        get_meta_resp_after = client1.generic_request(client1.memcached(self.key).getMeta, self.key)
        self.assertEquals(get_meta_resp_before[2], get_meta_resp_after[2]) # 3rd value is expiry value
Exemple #22
0
    def test_MB_36087(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        g_key = "test_doc"
        bucket_name = "default"
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket=bucket_name)
        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, bucket_name)
        for i in range(self.num_items):
            key = g_key + str(i)
            mcd = client.memcached(key)
            rc = mcd.set(key, 0, 0, json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                key,
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # Wait for key to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = \
                    client.observe(key)

            time.sleep(10)
            # Evict the key
            try:
                rc = mcd.evict_key(key)
            except MemcachedError as exp:
                self.fail("Exception with evict meta - %s" % exp)

            # Perform del_with_meta
            try:
                mcd = client.memcached(key)
                _, flags, exp, seqno, cas = client.memcached(key).getMeta(key)
                rc = mcd.del_with_meta(key, 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
 def _verify_es_results(self, bucket='default'):
     es_docs = self.esrest_conn.all_docs()
     self.log.info("Retrieved ES Docs")
     memcached_conn = VBucketAwareMemcached(self.rest, bucket)
     self.log.info("Comparing CB and ES data")
     for doc in es_docs:
         es_data = doc['doc']
         mc_active = memcached_conn.memcached(str(es_data['_id']))
         cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
         self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
                              format(str(es_data), str(cb_data)))
     self.log.info("Data verified")
Exemple #24
0
 def _reader_thread(self, inserted_keys, bucket_data, moxi=False):
     errors = []
     rest = RestConnection(self._servers[0])
     smartclient = None
     for name in bucket_data:
         for key in inserted_keys:
             if moxi:
                 moxi = MemcachedClientHelper.proxy_client(self._servers[0], name)
             else:
                 smartclient = VBucketAwareMemcached(rest, name)
             try:
                 if moxi:
                     moxi.get(key)
                 else:
                     smartclient.memcached(key).get(key)
             except Exception as ex:
                 errors.append({"error": ex, "key": key})
                 self.log.info(ex)
                 if not moxi:
                     smartclient.done()
                     smartclient = VBucketAwareMemcached(rest, name)
    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        time.sleep(60)
        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=True)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')
Exemple #26
0
 def _verify_es_results(self, bucket='default'):
     esrest_conn = EsRestConnection(self.dest_master)
     es_docs = esrest_conn.all_docs()
     self.log.info("Retrieved ES Docs")
     rest_conn = RestConnection(self.src_master)
     memcached_conn = VBucketAwareMemcached(rest_conn, bucket)
     self.log.info("Comparing CB and ES data")
     for doc in es_docs:
         es_data = doc['doc']
         mc_active = memcached_conn.memcached(str(es_data['_id']))
         cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
         self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
                              format(str(es_data), str(cb_data)))
     self.log.info("Data verified")
    def do_basic_ops(self):

        KEY_NAME = 'key1'
        KEY_NAME2 = 'key2'
        CAS = 1234
        self.log.info('Starting basic ops')


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)

        # MB-17231 - incr with full eviction
        rc = mcd.incr(KEY_NAME, 1)
        print 'rc for incr', rc



        # MB-17289 del with meta


        rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
        print 'set is', rc
        cas = rc[1]


        # wait for it to persist
        persisted = 0
        while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = client.observe(KEY_NAME)


        try:
            rc = mcd.evict_key(KEY_NAME)

        except MemcachedError as exp:
            self.fail("Exception with evict meta - {0}".format(exp) )

        CAS = 0xabcd
        # key, value, exp, flags, seqno, remote_cas

        try:
            #key, exp, flags, seqno, cas
            rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)



        except MemcachedError as exp:
            self.fail("Exception with del_with meta - {0}".format(exp) )
Exemple #28
0
    def do_basic_ops(self):

        KEY_NAME = 'key1'
        KEY_NAME2 = 'key2'
        CAS = 1234
        self.log.info('Starting basic ops')

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)

        # MB-17231 - incr with full eviction
        rc = mcd.incr(KEY_NAME, 1)
        print('rc for incr', rc)

        # MB-17289 del with meta

        rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
        print('set is', rc)
        cas = rc[1]

        # wait for it to persist
        persisted = 0
        while persisted == 0:
            opaque, rep_time, persist_time, persisted, cas = client.observe(
                KEY_NAME)

        time.sleep(10)
        try:
            rc = mcd.evict_key(KEY_NAME)

        except MemcachedError as exp:
            self.fail("Exception with evict meta - {0}".format(exp))

        CAS = 0xabcd
        # key, value, exp, flags, seqno, remote_cas

        try:
            #key, exp, flags, seqno, cas
            rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)

        except MemcachedError as exp:
            self.fail("Exception with del_with meta - {0}".format(exp))
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'
        gen_create = BlobGenerator('superlongnameofkey1234567890123456789012345678902', 'eviction-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])

        total_keys_full_eviction = 0
        for bucket in self.buckets:
            total_keys_full_eviction += len(bucket.kvs[1].key_set()[0])
        
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
                                                     cluster_host="localhost:8091",
                                                     user=self.master.rest_username,
                                                     password=self.master.rest_password,
                                                     options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            self.servers[:self.nodes_init], self,
            wait_time=self.wait_timeout, wait_if_warmup=True)
        
        total_keys_value_eviction = 0
        for bucket in self.buckets:
            total_keys_value_eviction += len(bucket.kvs[1].key_set()[0])

        self.assertTrue(total_keys_full_eviction == total_keys_value_eviction, msg="Keys before and after eviction policy change from fullEviction to valueOnly differ")

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, "default")
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
            # self.fail(rc) # Bucket is incorrectly functioning
        except MemcachedError as e:
            pass  # this is the exception we are hoping for
    def test_appendprepend(self):
        # MB-32078, Append with CAS=0 can return ENGINE_KEY_EEXISTS
        # observe.observeseqnotests.ObserveSeqNoTests.test_appendprepend
        self.log.info('Starting test_appendprepend')
        TEST_SEQNO = 123
        TEST_CAS = 456
        KEY_NAME = 'test_appendprepend'

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        # create a value with CAS
        client.generic_request(
            client.memcached(KEY_NAME).set_with_meta, KEY_NAME, 0, 0,
            TEST_SEQNO, TEST_CAS, '123456789')
        get_resp = client.generic_request(
            client.memcached(KEY_NAME).get, KEY_NAME)
        self.assertEqual(get_resp[2], '123456789')

        # check if the changing the value fails
        try:
            client.generic_request(
                client.memcached(KEY_NAME).set_with_meta, KEY_NAME, 0, 0, 0, 0,
                'value')
            self.fail('Expected to fail but passed')
        except MemcachedError as exp:
            self.assertEqual(int(exp.status), 2)

        # check if append works fine
        client.generic_request(
            client.memcached(KEY_NAME).append, KEY_NAME, 'appended data')
        get_resp = client.generic_request(
            client.memcached(KEY_NAME).get, KEY_NAME)
        self.assertEqual(get_resp[2], '123456789appended data')

        # check if prepend works fine and verify the data
        client.generic_request(
            client.memcached(KEY_NAME).prepend, KEY_NAME, 'prepended data')
        get_resp = client.generic_request(
            client.memcached(KEY_NAME).get, KEY_NAME)
        self.assertEqual(get_resp[2], 'prepended data123456789appended data')
    def _run_observe(self):
        tasks = []
        query_set = "true"
        persisted = 0
        mutated = False
        count = 0
        for bucket in self.buckets:
            self.cluster.create_view(self.master, self.default_design_doc,
                                      self.default_view, bucket , self.wait_timeout * 2)
            client = VBucketAwareMemcached(RestConnection(self.master), bucket)
            self.max_time = timedelta(microseconds=0)
            if self.mutate_by == "multi_set":
                key_val = self._create_multi_set_batch()
                client.setMulti(0, 0, key_val)
            keys = ["observe%s" % (i) for i in xrange(self.num_items)]
            for key in keys:
                mutated = False
                while not mutated and count < 60:
                    try:
                        if self.mutate_by == "set":
                            # client.memcached(key).set(key, 0, 0, "set")
                            client.set(key, 0, 0, "setvalue")
                        elif self.mutate_by == "append":
                            client.memcached(key).append(key, "append")
                        elif self.mutate_by == "prepend" :
                            client.memcached(key).prepend(key, "prepend")
                        elif self.mutate_by == "incr":
                            client.memcached(key).incr(key, 1)
                        elif self.mutate_by == "decr":
                            client.memcached(key).decr(key)
                        mutated = True
                        t_start = datetime.now()
                    except MemcachedError as error:
                        if error.status == 134:
                            loaded = False
                            self.log.error("Memcached error 134, wait for 5 seconds and then try again")
                            count += 1
                            time.sleep(5)
                while persisted == 0:
                    opaque, rep_time, persist_time, persisted, cas = client.observe(key)
                t_end = datetime.now()
                self.log.info("##########key:-%s################" % (key))
                self.log.info("Persisted:- %s" % (persisted))
                self.log.info("Persist_Time:- %s" % (rep_time))
                self.log.info("Time2:- %s" % (t_end - t_start))
                if self.max_time <= (t_end - t_start):
                    self.max_time = (t_end - t_start)
                    self.log.info("Max Time taken for observe is :- %s" % self.max_time)
                    self.log.info("Cas Value:- %s" % (cas))
            query = {"stale" : "false", "full_set" : "true", "connection_timeout" : 60000}
            self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items, bucket, timeout=self.wait_timeout)
            self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, bucket))
            # check whether observe has to run with delete and delete parallel with observe or not
            if len (self.observe_with) > 0 :
                if self.observe_with == "delete" :
                    self.log.info("Deleting 0- %s number of items" % (self.num_items / 2))
                    self._load_doc_data_all_buckets('delete', 0, self.num_items / 2)
                    query_set = "true"
                elif self.observe_with == "delete_parallel":
                    self.log.info("Deleting Parallel 0- %s number of items" % (self.num_items / 2))
                    tasks = self._async_load_doc_data_all_buckets('delete', 0, self.num_items / 2)
                    query_set = "false"
                for key in keys:
                    opaque, rep_time, persist_time, persisted, cas = client.memcached(key).observe(key)
                    self.log.info("##########key:-%s################" % (key))
                    self.log.info("Persisted:- %s" % (persisted))
                if self.observe_with == "delete_parallel":
                    for task in tasks:
                        task.result()

                query = {"stale" : "false", "full_set" : query_set, "connection_timeout" : 60000}
                self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, self.num_items / 2, bucket, timeout=self.wait_timeout)
                self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name))

        """test_observe_basic_data_load_delete will test observer basic scenario
class SyncReplicationTest(unittest.TestCase):
    awareness = None

    def common_setup(self, replica):
        self._input = TestInputSingleton.input
        self._servers = self._input.servers
        first = self._servers[0]
        self.log = logger.Logger().get_logger()
        self.log.info(self._input)
        rest = RestConnection(first)
        for server in self._servers:
            RestHelper(RestConnection(server)).is_ns_server_running()

        ClusterOperationHelper.cleanup_cluster(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        info = rest.get_nodes_self()
        bucket_ram = info.mcdMemoryReserved * 3 / 4
        rest.create_bucket(bucket="default",
                           ramQuotaMB=int(bucket_ram),
                           replicaNumber=replica,
                           proxyPort=rest.get_nodes_self().moxi)
        msg = "wait_for_memcached fails"
        ready = BucketOperationHelper.wait_for_memcached(first, "default"),
        self.assertTrue(ready, msg)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(first.ip))
        self.log.info('started rebalance operation on master node {0}'.format(first.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        # without a bucket this seems to fail
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
        self.awareness = VBucketAwareMemcached(rest, "default")

    def tearDown(self):
        if self.awareness:
            self.awareness.done()
            ClusterOperationHelper.cleanup_cluster(self._servers)
            BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)

    def test_one_replica(self):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 1024)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            mc.sync_replication(1, [{"key": k, "vbucket": vBucket}])
        for k in keys:
            mc = self.awareness.memcached(k)
            mc.get(k)


    def test_one_replica_one_node(self):
        pass

    def test_one_replica_multiple_nodes(self):
        pass

    def test_one_replica_bucket_replica_one(self):
        pass

    def test_two_replica(self):
        self._unsupported_replicas(2)

    def test_three_replica(self):
        self._unsupported_replicas(1)

    def _unsupported_replicas(self, replica):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 102400)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            mc.get(k)
            try:
                mc.sync_replication(replica, [{"key": k, "vbucket": vBucket}])
                msg = "server did not raise an error when running sync_replication with {0} replicas"
                self.fail(msg.format(replica))
            except MemcachedError as error:
                self.log.info("error {0} {1} as expected".format(error.status, error.msg))

        for k in keys:
            mc = self.awareness.memcached(k)
            mc.get(k)

    def test_invalid_key(self):
        pass

    def test_not_your_vbucket(self):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 1024)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            not_your_vbucket_mc = self.awareness.not_my_vbucket_memcached(k)
            try:
                count = 0
                expected_error = 0
                while count < 100:
                    a, b, response = not_your_vbucket_mc.sync_replication(1,
                        [{"key": k, "vbucket": vBucket}])
                    count += 1
                    self.log.info("response : {0}".format(response))
                    if response and response[0]["event"] != "invalid key":
                        expected_error += 1
                if expected_error is not 100:
                    self.fail(msg="server did not raise an error when running sync_replication with invalid vbucket")
            except MemcachedError as error:
                self.log.error(error)

    def test_some_invalid_keys(self):
        pass

    def stest_ome_not_your_vbucket(self):
        pass

    def test_some_large_values(self):
        pass

    def test_too_many_keys(self):
        pass

    def test_singlenode(self):
        pass
    def test_new_response_fields(self):

        self.log.info('\n\nStarting test_new_response_fields')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')


        h = client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );



        set_resp = self.extract_vbucket_uuid_and_seqno( client.set('test1key', 0, 0, '123456789') )

        # test the inplace operations
        test = client.generic_request(client.memcached('test1key').set, 'test1key', 0, 0,'totally new value')
        replace_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').replace,  'test1key', 0, 0,'totally new value') )
        self.verify_vbucket_and_seqno( set_resp, replace_resp, 'replace')

        append_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').append, 'test1key', 'appended data') )
        self.verify_vbucket_and_seqno(replace_resp, append_resp, 'append')

        prepend_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').prepend, 'test1key', 'prepended data') )
        self.verify_vbucket_and_seqno(append_resp, prepend_resp, 'prepend')


        # and finally do the delete
        delete_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('test1key').delete,'test1key') )
        self.verify_vbucket_and_seqno( set_resp, delete_resp, 'delete')


        #meta commands under construction
        # test the 'meta' commands
        TEST_SEQNO = 123
        TEST_CAS = 456

        set_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').set_with_meta, 'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, '123456789')
        set_meta_vbucket_uuid, set_meta_seqno = struct.unpack('>QQ', set_with_meta_resp[2])
        set_with_meta_dict = {'vbucket_uuid':set_meta_vbucket_uuid, 'seqno': set_meta_seqno}



        get_meta_resp = client.generic_request(client.memcached( 'test1keyformeta').getMeta, 'test1keyformeta')
        self.assertTrue(TEST_SEQNO == get_meta_resp[3], \
               msg='get meta seqno does not match as set. Expected {0}, actual {1}'.format(TEST_SEQNO,get_meta_resp[3]) )
        self.assertTrue(TEST_CAS == get_meta_resp[4], \
               msg='get meta cas does not match as set. Expected {0}, actual {1}'.format(TEST_CAS,get_meta_resp[4]) )


        #   def del_with_meta(self, key, exp, flags, seqno, old_cas, new_cas, vbucket= -1):
        del_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').del_with_meta,'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, TEST_CAS+1)
        vbucket_uuid, seqno = struct.unpack('>QQ', del_with_meta_resp[2])
        del_with_meta_dict = {'vbucket_uuid':vbucket_uuid, 'seqno': seqno}

        self.verify_vbucket_and_seqno( set_with_meta_dict, del_with_meta_dict, 'set/del with meta')





        #  do some integer operations
        set_resp = self.extract_vbucket_uuid_and_seqno( client.set('key-for-integer-value', 0, 0, '123') )
        incr_resp = client.generic_request(client.memcached('key-for-integer-value').incr, 'key-for-integer-value')
        incr_resp_dict = {'vbucket_uuid':incr_resp[2], 'seqno':incr_resp[3]}
        self.verify_vbucket_and_seqno(set_resp, incr_resp_dict, 'incr')


        decr_resp = client.generic_request(client.memcached('key-for-integer-value').decr,'key-for-integer-value')
        decr_resp_dict = {'vbucket_uuid':decr_resp[2], 'seqno':decr_resp[3]}
        self.verify_vbucket_and_seqno(incr_resp_dict, decr_resp_dict, 'decr')


        add_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request( client.memcached('totally new key').add, 'totally new key', 0, 0,'totally new value') )

        self.assertTrue( add_resp['vbucket_uuid'] > 0, msg='Add request vbucket uuid is zero')

        self.log.info('\n\nComplete test_new_response_fields\n\n')
Exemple #34
0
    def verify_cas(self, ops, generator):
        """Verify CAS value manipulation.

        For update we use the latest CAS value return by set()
        to do the mutation again to see if there is any exceptions.
        We should be able to mutate that item with the latest CAS value.
        For delete(), after it is called, we try to mutate that item with the
        cas vavlue returned by delete(). We should see Memcached Error. Otherwise
        the test should fail.
        For expire, We want to verify using the latest CAS value of that item
        can not mutate it because it is expired already."""

        for bucket in self.buckets:
            client = VBucketAwareMemcached(RestConnection(self.master),
                                           bucket.name)
            gen = generator
            cas_error_collection = []
            data_error_collection = []
            while gen.has_next():
                key, value = next(gen)

                if ops in ["update", "touch"]:
                    for x in range(self.mutate_times):
                        o_old, cas_old, d_old = client.get(key)
                        if ops == 'update':
                            client.memcached(key).cas(
                                key, 0, 0, cas_old,
                                "{0}-{1}".format("mysql-new-value", x))
                        else:
                            client.memcached(key).touch(key, 10)

                        o_new, cas_new, d_new = client.memcached(key).get(key)
                        if cas_old == cas_new:
                            print('cas did not change')
                            cas_error_collection.append(cas_old)

                        if ops == 'update':
                            if d_new != "{0}-{1}".format("mysql-new-value", x):
                                data_error_collection.append(
                                    (d_new,
                                     "{0}-{1}".format("mysql-new-value", x)))
                            if cas_old != cas_new and d_new == "{0}-{1}".format(
                                    "mysql-new-value", x):
                                self.log.info(
                                    "Use item cas {0} to mutate the same item with key {1} successfully! Now item cas is {2} "
                                    .format(cas_old, key, cas_new))

                        mc_active = client.memcached(key)
                        mc_replica = client.memcached(key, replica_index=0)

                        active_cas = int(
                            mc_active.stats('vbucket-details')
                            ['vb_' + str(client._get_vBucket_id(key)) +
                             ':max_cas'])
                        self.assertTrue(
                            active_cas == cas_new,
                            'cbstats cas mismatch. Expected {0}, actual {1}'.
                            format(cas_new, active_cas))

                        replica_cas = int(
                            mc_replica.stats('vbucket-details')
                            ['vb_' + str(client._get_vBucket_id(key)) +
                             ':max_cas'])

                        poll_count = 0
                        while replica_cas != active_cas and poll_count < 5:
                            time.sleep(1)
                            replica_cas = int(
                                mc_replica.stats('vbucket-details')
                                ['vb_' + str(client._get_vBucket_id(key)) +
                                 ':max_cas'])
                            poll_count = poll_count + 1

                        if poll_count > 0:
                            self.log.info(
                                'Getting the desired CAS was delayed {0} seconds'
                                .format(poll_count))

                        self.assertTrue(
                            active_cas == replica_cas,
                            'replica cas mismatch. Expected {0}, actual {1}'.
                            format(cas_new, replica_cas))

                elif ops == "delete":
                    o, cas, d = client.memcached(key).delete(key)
                    time.sleep(10)
                    self.log.info(
                        "Delete operation set item cas with key {0} to {1}".
                        format(key, cas))
                    try:
                        client.memcached(key).cas(key, 0, self.item_flag, cas,
                                                  value)
                        raise Exception(
                            "The item should already be deleted. We can't mutate it anymore"
                        )
                    except MemcachedError as error:
                        # It is expected to raise MemcachedError because the key is deleted.
                        if error.status == ERR_NOT_FOUND:
                            self.log.info("<MemcachedError #%d ``%s''>" %
                                          (error.status, error.msg))
                            pass
                        else:
                            raise Exception(error)
                elif ops == "expire":
                    o, cas, d = client.memcached(key).set(
                        key, self.expire_time, 0, value)
                    time.sleep(self.expire_time + 1)
                    self.log.info(
                        "Try to mutate an expired item with its previous cas {0}"
                        .format(cas))
                    try:
                        client.memcached(key).cas(key, 0, self.item_flag, cas,
                                                  value)
                        raise Exception(
                            "The item should already be expired. We can't mutate it anymore"
                        )
                    except MemcachedError as error:
                        # It is expected to raise MemcachedError becasue the key is expired.
                        if error.status == ERR_NOT_FOUND:
                            self.log.info("<MemcachedError #%d ``%s''>" %
                                          (error.status, error.msg))
                            pass
                        else:
                            raise Exception(error)

            if len(cas_error_collection) > 0:
                for cas_value in cas_error_collection:
                    self.log.error(
                        "Set operation fails to modify the CAS {0}".format(
                            cas_value))
                raise Exception("Set operation fails to modify the CAS value")
            if len(data_error_collection) > 0:
                for data_value in data_error_collection:
                    self.log.error(
                        "Set operation fails. item-value is {0}, expected is {1}"
                        .format(data_value[0], data_value[1]))
                raise Exception("Set operation fails to change item value")
    def test_failover(self):

        self.log.info('\n\nStarting test_failover')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)

        self.log.info(
            '\n\nVerify responses are correct after graceful failover')

        op_data = self.extract_vbucket_uuid_and_seqno(
            client.set('failoverkey', 0, 0, 'failovervalue'))
        op_data['format_type'] = 'no_failover'

        # don't really need to do this so it is commented
        #pre_failover_results = self.observe_seqno_response_to_dict( client.observe_seqno('failoverkey', vbucket_uuid) )

        # which server did the key go to and gracefully fail that server

        self.log.info('\n\nstarting graceful failover scenario')
        server_with_key = client.memcached('failoverkey').host
        self.log.info(
            '\n\nserver {0} has the key and it will be failed over'.format(
                server_with_key))

        RebalanceHelper.wait_for_persistence(self.master,
                                             self.default_bucket_name)

        # now failover
        RestConnection(self.master).fail_over(otpNode='ns_1@' +
                                              server_with_key,
                                              graceful=True)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)

        self.log.info('server should be failed over now')

        time.sleep(5)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached('failoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('failoverkey', op_data['vbucket_uuid']))

        # verify: no (hard) failover, everything else as before
        self.check_results(op_data, after_failover_results)
        self.log.info('Test complete')

        # now do a hard failover

        # which server did the key go to and gracefully fail that server

        time.sleep(30)
        self.log.info('\n\nstarting hard failover scenario')

        client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)
        op_data = self.extract_vbucket_uuid_and_seqno(
            client.set('hardfailoverkey', 0, 0, 'failovervalue'))
        op_data['format_type'] = 'hard_failover'

        server_with_key = client.memcached('hardfailoverkey').host
        self.log.info(
            '\n\nserver {0} has the key and it will be hard failed over'.
            format(server_with_key))

        # now failover
        RestConnection(self.master).fail_over(otpNode='ns_1@' +
                                              server_with_key,
                                              graceful=False)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)

        self.log.info('\n\nserver should be failed over now')

        time.sleep(10)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached('hardfailoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        time.sleep(10)

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('hardfailoverkey', op_data['vbucket_uuid']))

        self.check_results(op_data, after_failover_results)

        self.log.info('Test complete')
    def verify_cas(self, ops, generator):
        """Verify CAS value manipulation.

        For update we use the latest CAS value return by set()
        to do the mutation again to see if there is any exceptions.
        We should be able to mutate that item with the latest CAS value.
        For delete(), after it is called, we try to mutate that item with the
        cas vavlue returned by delete(). We should see Memcached Error. Otherwise
        the test should fail.
        For expire, We want to verify using the latest CAS value of that item
        can not mutate it because it is expired already."""


        client = VBucketAwareMemcached(RestConnection(self.master), 'default')


        for bucket in self.buckets:
            gen = generator
            cas_error_collection = []
            data_error_collection = []
            while gen.has_next():
                key, value = gen.next()


                if ops in  ["update","touch"]:
                    for x in range(self.mutate_times):
                        o_old, cas_old, d_old = client.get(key)
                        if ops == 'update':
                            client.memcached(key).cas(key, 0, 0, cas_old, "{0}-{1}".format("mysql-new-value", x))
                        else:
                            rc = client.memcached(key).touch(key,10)

                        o_new, cas_new, d_new = client.memcached(key).get(key)
                        if cas_old == cas_new:
                            print 'cas did not change'
                            cas_error_collection.append(cas_old)

                        if ops == 'update':
                            if d_new != "{0}-{1}".format("mysql-new-value", x):
                                data_error_collection.append((d_new,"{0}-{1}".format("mysql-new-value", x)))
                            if cas_old != cas_new and d_new == "{0}-{1}".format("mysql-new-value", x):
                                self.log.info("Use item cas {0} to mutate the same item with key {1} successfully! Now item cas is {2} "
                                              .format(cas_old, key, cas_new))

                        mc_active = client.memcached( key )
                        mc_replica = client.memcached( key, replica_index=0 )

                        active_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(key)) + ':max_cas'] )
                        replica_cas = int( mc_replica.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(key)) + ':max_cas'] )
                        self.assertTrue(active_cas == cas_new,
                                        'cbstats cas mismatch. Expected {0}, actual {1}'.format( cas_new, active_cas))


                        replica_cas = int( mc_replica.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(key)) + ':max_cas'] )

                        poll_count = 0
                        while replica_cas != active_cas and poll_count < 5:
                            time.sleep(1)
                            replica_cas = int( mc_replica.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(key)) + ':max_cas'] )
                            poll_count = poll_count + 1

                        if poll_count > 0:
                            self.log.info('Getting the desired CAS was delayed {0} seconds'.format( poll_count) )


                        self.assertTrue(active_cas == replica_cas,
                                        'replica cas mismatch. Expected {0}, actual {1}'.format( cas_new, replica_cas))

                elif ops == "delete":
                    o, cas, d = client.memcached(key).delete(key)
                    time.sleep(10)
                    self.log.info("Delete operation set item cas with key {0} to {1}".format(key, cas))
                    try:
                        client.memcached(key).cas(key, 0, self.item_flag, cas, value)
                        raise Exception("The item should already be deleted. We can't mutate it anymore")
                    except MemcachedError as error:
                    #It is expected to raise MemcachedError because the key is deleted.
                        if error.status == ERR_NOT_FOUND:
                            self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
                            pass
                        else:
                            raise Exception(error)
                elif ops == "expire":
                    o, cas, d = client.memcached(key).set(key, self.expire_time, 0, value)
                    time.sleep(self.expire_time+1)
                    self.log.info("Try to mutate an expired item with its previous cas {0}".format(cas))
                    try:
                        client.memcached(key).cas(key, 0, self.item_flag, cas, value)
                        raise Exception("The item should already be expired. We can't mutate it anymore")
                    except MemcachedError as error:
                    #It is expected to raise MemcachedError becasue the key is expired.
                        if error.status == ERR_NOT_FOUND:
                            self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
                            pass
                        else:
                            raise Exception(error)

            if len(cas_error_collection) > 0:
                for cas_value in cas_error_collection:
                    self.log.error("Set operation fails to modify the CAS {0}".format(cas_value))
                raise Exception("Set operation fails to modify the CAS value")
            if len(data_error_collection) > 0:
                for data_value in data_error_collection:
                    self.log.error("Set operation fails. item-value is {0}, expected is {1}".format(data_value[0], data_value[1]))
                raise Exception("Set operation fails to change item value")
Exemple #37
0
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        time.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
Exemple #38
0
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [], [self.master])

        rebalance.result()
        time.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
Exemple #39
0
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        k=0

        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        try:
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789', vbucket_id)
        except mc_bin_client.MemcachedError as e:
            # this is expected
            pass
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        #self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
Exemple #40
0
class OpsChangeCasTests(BucketConfig):

    def setUp(self):
        super(OpsChangeCasTests, self).setUp()
        self.prefix = "test_"
        self.expire_time = self.input.param("expire_time", 35)
        self.item_flag = self.input.param("item_flag", 0)
        self.value_size = self.input.param("value_size", 256)
        self.items = self.input.param("items", 20)
        self.rest = RestConnection(self.master)
        self.client = VBucketAwareMemcached(self.rest, self.bucket)

    def tearDown(self):
        super(OpsChangeCasTests, self).tearDown()

    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [], [self.master])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        # not supported in 4.6 self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_failover(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [], [self.master])

        rebalance.result()
        time.sleep(60)

        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        #print 'replica CAS {0}'.format(replica_CAS)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        get_meta_resp = mc_active.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_soft_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # restart nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        time.sleep(60)
        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test Incremental sets on cas and max cas values for keys
    '''
    def test_cas_set(self):
        self.log.info(' Starting test-sets')
        self._load_ops(ops='set', mutations=20)
        time.sleep(60)
        self._check_cas(check_conflict_resolution=False)

    ''' Test Incremental updates on cas and max cas values for keys
    '''
    def test_cas_updates(self):
        self.log.info(' Starting test-updates')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace', mutations=20)
        #self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    ''' Test Incremental deletes on cas and max cas values for keys
    '''
    def test_cas_deletes(self):
        self.log.info(' Starting test-deletes')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace', mutations=20)
        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    ''' Test expiry on cas and max cas values for keys
    '''
    def test_cas_expiry(self):
        self.log.info(' Starting test-expiry')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        #self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='expiry')
        self._check_cas(check_conflict_resolution=False)
        self._check_expiry()

    ''' Test touch on cas and max cas values for keys
    '''
    def test_cas_touch(self):
        self.log.info(' Starting test-touch')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        #self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='touch')
        self._check_cas(check_conflict_resolution=False)

    ''' Test getMeta on cas and max cas values for keys
    '''
    def test_cas_getMeta(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)



    def test_cas_setMeta_lower(self):

        self.log.info(' Starting test-getMeta')


        # set some kv
        self._load_ops(ops='set', mutations=1)
        #self._check_cas(check_conflict_resolution=False)

        k=0
        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            self.log.info('For key {0} the vbucket is {1}'.format( key, vbucket_id ))
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            TEST_CAS = k

            rc = mc_active.getMeta(key)
            cas = rc[4] + 1

            self.log.info('Key {0} retrieved CAS is {1} and will set CAS to {2}'.format(key, rc[4], cas))
            rev_seqno = rc[3]



            # do a set meta based on the existing CAS
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 123, cas)


            # check what get meta say
            rc = mc_active.getMeta(key)
            cas_post_meta = rc[4]
            self.log.info('Getmeta CAS is {0}'.format(cas_post_meta))
            self.assertTrue( cas_post_meta == cas, 'Meta expected {0} actual {1}'.format( cas, cas_post_meta))

            # and what stats says
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Max CAS for key {0} vbucket is {1}'.format( key, max_cas))
            self.assertTrue(cas_post_meta >= max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))


            # do another mutation and compare
            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # and then mix in a set with meta
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 225, max_cas+1)
            cas_post_meta = mc_active.getMeta(key)[4]


            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))


            # and one more mutation for good measure
            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

    def test_cas_setMeta_higher(self):

        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr {0}'.format(get_meta_1)
            #print '-'*100
            TEST_SEQNO = 123
            TEST_CAS = 9966180844186042368

            cas = mc_active.getMeta(key)[4]
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id,
             #   add_extended_meta_data=True, conflict_resolution_mode=1)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, TEST_CAS)

            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not equal it is {0}'.format(cas_post_meta))
            self.assertTrue(max_cas > cas, '[ERROR]Max cas  is not higher than original cas {0}'.format(cas))

            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, max_cas+1)

            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS+1, '223456789',vbucket_id,
            #    add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_3 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr3 {0}'.format(get_meta_3)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not lower it is higher than {0}'.format(cas_post_meta))
            #self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


    ''' Test deleteMeta on cas and max cas values for keys
    '''
    def test_cas_deleteMeta(self):

        self.log.info(' Starting test-deleteMeta')


        # load 20 kvs and check the CAS
        self._load_ops(ops='set', mutations=20)
        time.sleep(60)
        self._check_cas(check_conflict_resolution=False)

        k=0
        test_cas = 456

        while k<1:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            test_cas = test_cas + 1


            # get the meta data
            cas = mc_active.getMeta(key)[4] + 1

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, cas)



            cas_post_meta = mc_active.getMeta(key)[4]

            # verify the observed CAS is as set

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))


            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # what is test cas for? Commenting out for now
            """
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, test_cas)
            cas_post_meta = mc_active.getMeta(key)[4]

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta < max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))
            """

            # test the delete

            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


            #
            self.log.info('Doing delete with meta, using a lower CAS value')
            get_meta_pre = mc_active.getMeta(key)[4]
            del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, test_cas, test_cas+1)
            get_meta_post = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas > test_cas+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, test_cas))




    ''' Testing skipping conflict resolution, whereby the last write wins, and it does neither cas CR nor rev id CR
    '''
    def test_cas_skip_conflict_resolution(self):

        self.log.info(' Starting test_cas_skip_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            low_seq=12

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to allow insertion of lower Seq Number')
            lower_cas = int(cas)-1
            #import pdb;pdb.set_trace()
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, low_seq, lower_cas, 3)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')

            #print 'cas meta data after set_meta_force {0}'.format(cas_post_meta)
            #print 'all meta data after set_meta_force {0}'.format(all_post_meta)
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq > post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        '''
    def test_revid_conflict_resolution(self):

        self.log.info(' Starting test_cas_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, cas, '123456789', vbucket_id,
                                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))



    ''' Testing conflict resolution, where timeSync is enabled and cas is lower but higher revid, expect Higher Cas to Win
        '''
    def test_cas_conflict_resolution(self):

        self.log.info(' Starting test_cas_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            lower_cas = int(cas)-100
            self.log.info('Forcing lower rev-id to win with higher CAS value, instead of higher rev-id with Lower Cas ')
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, lower_cas, '123456789',vbucket_id)
            try:
                set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, lower_cas)
            except mc_bin_client.MemcachedError as e:
                # this is expected
                pass

            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect CAS conflict_resolution to occur, and the first mutation to be the winner..')

            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a restart server'''
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        k=0

        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        try:
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789', vbucket_id)
        except mc_bin_client.MemcachedError as e:
            # this is expected
            pass
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        #self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a rebalance server'''
    def test_rebalance_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)

        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [], [self.master])
        rebalance.result()
        time.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        print('active cas {0}'.format(active_CAS))

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a failover server'''
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [], [self.master])

        rebalance.result()
        time.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test getMeta on cas and max cas values for empty vbucket
    '''
    def test_cas_getMeta_empty_vBucket(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        k=0
        all_keys = []
        while k<10:
            k+=1
            key = "{0}{1}".format(self.prefix, k)
            all_keys.append(key)

        vbucket_ids = self.client._get_vBucket_ids(all_keys)

        print('bucket_ids')
        for v in vbucket_ids:
            print(v)

        print('done')

        i=1111
        if i not in vbucket_ids and i <= 1023:
            vb_non_existing=i
        elif i>1023:
            i +=1
        else:
            self.log.info('ERROR generating empty vbucket id')

        vb_non_existing=vbucket_ids.pop()
        print('nominated vb_nonexisting is {0}'.format(vb_non_existing))
        mc_active = self.client.memcached(all_keys[0]) #Taking a temp connection to the mc.
        #max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(vb_non_existing) + ':max_cas'] )
        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(all_keys[0])) + ':max_cas'] )
        self.assertTrue( max_cas != 0, msg='[ERROR] Max cas is non-zero')


    ''' Test addMeta on cas and max cas values for keys
    '''
    def test_meta_backup(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        '''Do the backup on the bucket '''
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
            print('Done with restore')
        finally:
            self._check_cas(check_conflict_resolution=False)

    ''' Common function to verify the expected values on cas
    '''
    def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None):
        self.log.info(' Verifying cas and max cas for the keys')
        #select_count = 20 #Verifying top 20 keys
        if master:
            self.rest = RestConnection(master)
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)

            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            #print 'max_cas is {0}'.format(max_cas)
            self.assertTrue(cas == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False)
                if time_sync == 'enabledWithoutDrift':
                    self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set')
                elif time_sync == 'disabled':
                    self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')

    ''' Common function to add set delete etc operations on the bucket
    '''
    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops=='set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0, payload)
                elif ops=='add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0, payload)
                elif ops=='replace':
                    self.client.memcached(key).replace(key, 0, 0, payload)
                    #print 'Replace'
                elif ops=='delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops=='expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time, 0, payload)
                elif ops=='touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))

    '''Check if items are expired as expected'''
    def _check_expiry(self):
        time.sleep(self.expire_time+30)

        k=0
        while k<10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)
            cas = mc_active.getMeta(key)[4]
            self.log.info("Try to mutate an expired item with its previous cas {0}".format(cas))
            try:
                all = mc_active.getMeta(key)
                a=self.client.memcached(key).get(key)
                self.client.memcached(key).cas(key, 0, self.item_flag, cas, 'new')
                all = mc_active.getMeta(key)

                raise Exception("The item should already be expired. We can't mutate it anymore")
            except MemcachedError as error:
            #It is expected to raise MemcachedError becasue the key is expired.
                if error.status == ERR_NOT_FOUND:
                    self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
                    pass
                else:
                    raise Exception(error)
    def test_failover(self):

        self.log.info('\n\nStarting test_failover')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        h = client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );


        self.log.info('\n\nVerify responses are correct after graceful failover')

        op_data = self.extract_vbucket_uuid_and_seqno( client.set('failoverkey', 0, 0, 'failovervalue') )
        op_data['format_type'] = 'no_failover'



        # don't really need to do this so it is commented
        #pre_failover_results = self.observe_seqno_response_to_dict( client.observe_seqno('failoverkey', vbucket_uuid) )




        # which server did the key go to and gracefully fail that server

        self.log.info('\n\nstarting graceful failover scenario')
        server_with_key = client.memcached( 'failoverkey').host
        self.log.info('\n\nserver {0} has the key and it will be failed over'.format(server_with_key))


        RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)

        # now failover
        RestConnection(self.master).fail_over(otpNode = 'ns_1@' + server_with_key, graceful=True)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)



        self.log.info('server should be failed over now')

        time.sleep(5)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached( 'failoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('failoverkey', op_data['vbucket_uuid']) )


        # verify: no (hard) failover, everything else as before
        self.check_results( op_data, after_failover_results)
        self.log.info('Test complete')






        # now do a hard failover

        # which server did the key go to and gracefully fail that server

        self.log.info('\n\nstarting hard failover scenario')

        client.sendHellos( memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO );
        op_data = self.extract_vbucket_uuid_and_seqno( client.set('hardfailoverkey', 0, 0, 'failovervalue') )
        op_data['format_type'] = 'hard_failover'


        server_with_key = client.memcached( 'hardfailoverkey').host
        self.log.info('\n\nserver {0} has the key and it will be hard failed over'.format(server_with_key))


        # now failover
        RestConnection(self.master).fail_over(otpNode = 'ns_1@' + server_with_key, graceful=False)

        if server_with_key in self.servers:
            self.servers.remove(server_with_key)



        self.log.info('\n\nserver should be failed over now')

        time.sleep(5)
        # reinstantiate the client so we get the new view of the world
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        server_with_key = client.memcached( 'hardfailoverkey').host
        self.log.info('\n\nkey is now on server {0}'.format(server_with_key))

        after_failover_results = self.observe_seqno_response_to_dict(
            client.observe_seqno('hardfailoverkey',op_data['vbucket_uuid']) )

        self.check_results( op_data, after_failover_results)

        self.log.info('Test complete')
    def test_items_append(self):
        self.desired_item_size = self.input.param("desired_item_size", 2048)
        self.append_size = self.input.param("append_size", 1024)
        self.fixed_append_size = self.input.param("fixed_append_size", True)
        self.append_ratio = self.input.param("append_ratio", 0.5)
        self._load_all_buckets(self.master, self.gen_create, "create", 0,
                               batch_size=10000, pause_secs=5, timeout_secs=100)

        for bucket in self.buckets:
            verify_dict = {}
            vkeys, dkeys = bucket.kvs[1].key_set()

            key_count = len(vkeys)
            app_ratio = self.append_ratio * key_count
            selected_keys = []
            i = 0
            for key in vkeys:
                i += 1
                if i >= app_ratio:
                    break
                selected_keys.append(key)

            awareness = VBucketAwareMemcached(RestConnection(self.master), bucket.name)
            if self.kv_verify:
                for key in selected_keys:
                    value = awareness.memcached(key).get(key)[2]
                    verify_dict[key] = value

            self.log.info("Bucket: {0}".format(bucket.name))
            self.log.info("Appending to have items whose initial size was "
                            + "{0} to equal or cross a size of {1}".format(self.value_size, self.desired_item_size))
            self.log.info("Item-appending of {0} items starting ..".format(len(selected_keys)+1))

            index = 3
            while self.value_size < self.desired_item_size:
                str_len = self.append_size
                if not self.fixed_append_size:
                    str_len = int(math.pow(2, index))

                for key in selected_keys:
                    random_string = self.random_str_generator(str_len)
                    awareness.memcached(key).append(key, random_string)

                    if self.kv_verify:
                        verify_dict[key] = verify_dict[key] + random_string

                self.value_size += str_len
                index += 1

            self.log.info("The appending of {0} items ended".format(len(selected_keys)+1))

            msg = "Bucket:{0}".format(bucket.name)
            self.log.info("VERIFICATION <" + msg + ">: Phase 0 - Check the gap between "
                      + "mem_used by the bucket and total_allocated_bytes")
            stats = StatsCommon()
            mem_used_stats = stats.get_stats(self.servers, bucket, 'memory', 'mem_used')
            total_allocated_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_allocated_bytes')
            total_fragmentation_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_fragmentation_bytes')

            for server in self.servers:
                self.log.info("In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}"
                              .format(server.ip, bucket.name, (int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server]))))
                self.log.info("In {0} bucket {1}, mem_used = {2}".format(server.ip, bucket.name, mem_used_stats[server]))
                self.log.info("In {0} bucket {1}, the difference between acutal memory used by memcached and mem_used is {2} times"
                              .format(server.ip, bucket.name, float(int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server]))/float(mem_used_stats[server])))

            self.log.info("VERIFICATION <" + msg + ">: Phase1 - Check if any of the "
                    + "selected keys have value less than the desired value size")
            for key in selected_keys:
                value = awareness.memcached(key).get(key)[2]
                if len(value) < self.desired_item_size:
                    self.fail("Failed to append enough to make value size surpass the "
                                + "size, for key {0}".format(key))

            if self.kv_verify:
                self.log.info("VERIFICATION <" + msg + ">: Phase2 - Check if the content "
                        + "after the appends match whats expected")
                for k in verify_dict:
                    if awareness.memcached(key).get(k)[2] != verify_dict[k]:
                        self.fail("Content at key {0}: not what's expected.".format(k))
                self.log.info("VERIFICATION <" + msg + ">: Successful")
Exemple #43
0
    def _run_observe(self):
        tasks = []
        query_set = "true"
        persisted = 0
        mutated = False
        count = 0
        for bucket in self.buckets:
            self.cluster.create_view(self.master, self.default_design_doc,
                                     self.default_view, bucket,
                                     self.wait_timeout * 2)
            client = VBucketAwareMemcached(RestConnection(self.master), bucket)
            self.max_time = timedelta(microseconds=0)
            if self.mutate_by == "multi_set":
                key_val = self._create_multi_set_batch()
                client.setMulti(0, 0, key_val)
            keys = ["observe%s" % (i) for i in range(self.num_items)]
            for key in keys:
                mutated = False
                while not mutated and count < 60:
                    try:
                        if self.mutate_by == "set":
                            # client.memcached(key).set(key, 0, 0, "set")
                            client.set(key, 0, 0, "setvalue")
                        elif self.mutate_by == "append":
                            client.memcached(key).append(key, "append")
                        elif self.mutate_by == "prepend":
                            client.memcached(key).prepend(key, "prepend")
                        elif self.mutate_by == "incr":
                            client.memcached(key).incr(key, 1)
                        elif self.mutate_by == "decr":
                            client.memcached(key).decr(key)
                        mutated = True
                        t_start = datetime.now()
                    except MemcachedError as error:
                        if error.status == 134:
                            loaded = False
                            self.log.error(
                                "Memcached error 134, wait for 5 seconds and then try again"
                            )
                            count += 1
                            time.sleep(5)
                while persisted == 0:
                    opaque, rep_time, persist_time, persisted, cas = client.observe(
                        key)
                t_end = datetime.now()
                #self.log.info("##########key:-%s################" % (key))
                #self.log.info("Persisted:- %s" % (persisted))
                #self.log.info("Persist_Time:- %s" % (rep_time))
                #self.log.info("Time2:- %s" % (t_end - t_start))
                if self.max_time <= (t_end - t_start):
                    self.max_time = (t_end - t_start)
                    self.log.info("Max Time taken for observe is :- %s" %
                                  self.max_time)
                    self.log.info("Cas Value:- %s" % (cas))
            query = {
                "stale": "false",
                "full_set": "true",
                "connection_timeout": 600000
            }
            self.cluster.query_view(self.master,
                                    "dev_Doc1",
                                    self.default_view.name,
                                    query,
                                    self.num_items,
                                    bucket,
                                    timeout=self.wait_timeout)
            self.log.info(
                "Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s"
                % (self.default_view, bucket))
            # check whether observe has to run with delete and delete parallel with observe or not
            if len(self.observe_with) > 0:
                if self.observe_with == "delete":
                    self.log.info("Deleting 0- %s number of items" %
                                  (self.num_items // 2))
                    self._load_doc_data_all_buckets('delete', 0,
                                                    self.num_items // 2)
                    query_set = "true"
                elif self.observe_with == "delete_parallel":
                    self.log.info("Deleting Parallel 0- %s number of items" %
                                  (self.num_items // 2))
                    tasks = self._async_load_doc_data_all_buckets(
                        'delete', 0, self.num_items // 2)
                    query_set = "false"
                for key in keys:
                    opaque, rep_time, persist_time, persisted, cas = client.memcached(
                        key).observe(key)
                    self.log.info("##########key:-%s################" % (key))
                    self.log.info("Persisted:- %s" % (persisted))
                if self.observe_with == "delete_parallel":
                    for task in tasks:
                        task.result()

                query = {
                    "stale": "false",
                    "full_set": query_set,
                    "connection_timeout": 600000
                }
                self.cluster.query_view(self.master,
                                        "dev_Doc1",
                                        self.default_view.name,
                                        query,
                                        self.num_items // 2,
                                        bucket,
                                        timeout=self.wait_timeout)
                self.log.info(
                    "Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s"
                    % (self.default_view, self.default_bucket_name))
        """test_observe_basic_data_load_delete will test observer basic scenario
Exemple #44
0
class Upgrade_EpTests(UpgradeTests):

    def setUp(self):
        super(Upgrade_EpTests, self).setUp()
        print self.master
        self.rest = RestConnection(self.master)
        self.bucket = 'default' # temp fix
        self.client = VBucketAwareMemcached(self.rest, self.bucket)
        self.time_synchronization ='disabled'
        print 'checking for self.servers '.format(self.servers[1])
        self.prefix = "test_"
        self.expire_time = 5
        self.item_flag = 0
        self.value_size = 256


    def tearDown(self):
        #super(Upgrade_EpTests, self).tearDown()
        self.testcase = '2'
        if not "skip_cleanup" in TestInputSingleton.input.test_params:
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self.testcase)
            ClusterOperationHelper.cleanup_cluster(self.servers)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                self.servers, self.testcase)

    def test_upgrade(self):
        self.log.info('Starting upgrade tests...')

        #o = OpsChangeCasTests()

        self.log.info('Inserting few items pre upgrade')
        self._load_ops(ops='set', mutations=20, master=self.master, bucket=self.bucket)
        self.log.info('Upgrading ..')
        try:
            UpgradeTests.test_upgrade(self)
        finally:
            self.log.info(' Done with Upgrade ')

        self.log.info('Testing the meta details on items post upgrade')
        #self._check_config()
        self._check_cas(check_conflict_resolution=True, master=self.servers[1], bucket=self.bucket)

    def _check_config(self):
        result = self.rest.get_bucket_json(self.bucket)["timeSynchronization"]
        print result
        self.assertEqual(result,self.time_synchronization, msg='ERROR, Mismatch on expected time synchronization values')
        self.log.info("Verified results")

    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k<10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops=='set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0,payload)
                elif ops=='add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0,payload)
                elif ops=='replace':
                    self.client.memcached(key).replace(key, 0, 0,payload)
                    #print 'Replace'
                elif ops=='delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops=='expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time ,0, payload)
                elif ops=='touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))

    ''' Common function to verify the expected values on cas
    '''
    def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None):
        self.log.info(' Verifying cas and max cas for the keys')
        if master:
            self.rest = RestConnection(master)
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0

        while k<10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)

            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            #print 'max_cas is {0}'.format(max_cas)
            self.assertTrue(cas == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=True)
                if time_sync == 'enabledWithoutDrift':
                    self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set')
                elif time_sync == 'disabled':
                    self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')
Exemple #45
0
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        k=0

        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
Exemple #46
0
    def test_items_append(self):
        self.desired_item_size = self.input.param("desired_item_size", 2048)
        self.append_size = self.input.param("append_size", 1024)
        self.fixed_append_size = self.input.param("fixed_append_size", True)
        self.append_ratio = self.input.param("append_ratio", 0.5)
        self._load_all_buckets(self.master,
                               self.gen_create,
                               "create",
                               0,
                               batch_size=1000,
                               pause_secs=5,
                               timeout_secs=100)

        for bucket in self.buckets:
            self.value_size = self.input.param("value_size", 512)
            verify_dict = {}
            vkeys, dkeys = bucket.kvs[1].key_set()

            key_count = len(vkeys)
            app_ratio = self.append_ratio * key_count
            selected_keys = []
            i = 0
            for key in vkeys:
                i += 1
                if i >= app_ratio:
                    break
                selected_keys.append(key)

            awareness = VBucketAwareMemcached(RestConnection(self.master),
                                              bucket.name)
            if self.kv_verify:
                for key in selected_keys:
                    value = awareness.memcached(key).get(key)[2]
                    verify_dict[key] = value

            self.log.info("Bucket: {0}".format(bucket.name))
            self.log.info("Appending to have items whose initial size was " +
                          "{0} to equal or cross a size of {1}".format(
                              self.value_size, self.desired_item_size))
            self.log.info("Item-appending of {0} items starting ..".format(
                len(selected_keys) + 1))

            index = 3
            while self.value_size < self.desired_item_size:
                str_len = self.append_size
                if not self.fixed_append_size:
                    str_len = int(math.pow(2, index))

                for key in selected_keys:
                    random_string = self.random_str_generator(str_len)
                    awareness.memcached(key).append(key, random_string)
                    if self.kv_verify:
                        verify_dict[key] = verify_dict[key] + random_string
                self.log.info(
                    "for {0} items size was increased to {1} Bytes".format(
                        len(selected_keys) + 1, self.value_size))
                self.value_size += str_len
                index += 1

            self.log.info("The appending of {0} items ended".format(
                len(selected_keys) + 1))

        for bucket in self.buckets:
            msg = "Bucket:{0}".format(bucket.name)
            self.log.info("VERIFICATION <" + msg +
                          ">: Phase 0 - Check the gap between " +
                          "mem_used by the bucket and total_allocated_bytes")
            stats = StatsCommon()
            mem_used_stats = stats.get_stats(self.servers, bucket, 'memory',
                                             'mem_used')
            total_allocated_bytes_stats = stats.get_stats(
                self.servers, bucket, 'memory', 'total_allocated_bytes')
            total_fragmentation_bytes_stats = stats.get_stats(
                self.servers, bucket, 'memory', 'total_fragmentation_bytes')

            for server in self.servers:
                self.log.info(
                    "In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}"
                    .format(server.ip, bucket.name,
                            (int(total_fragmentation_bytes_stats[server]) +
                             int(total_allocated_bytes_stats[server]))))
                self.log.info("In {0} bucket {1}, mem_used = {2}".format(
                    server.ip, bucket.name, mem_used_stats[server]))
                self.log.info(
                    "In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times"
                    .format(
                        server.ip, bucket.name,
                        float(
                            int(total_fragmentation_bytes_stats[server]) +
                            int(total_allocated_bytes_stats[server])) /
                        float(mem_used_stats[server])))

            self.log.info(
                "VERIFICATION <" + msg + ">: Phase1 - Check if any of the " +
                "selected keys have value less than the desired value size")
            for key in selected_keys:
                value = awareness.memcached(key).get(key)[2]
                if len(value) < self.desired_item_size:
                    self.fail(
                        "Failed to append enough to make value size surpass the "
                        + "size {0}, key {1} has size {2}".format(
                            self.desired_item_size, key, len(value)))

            if self.kv_verify:
                self.log.info("VERIFICATION <" + msg +
                              ">: Phase2 - Check if the content " +
                              "after the appends match what's expected")
                for k in verify_dict:
                    if awareness.memcached(k).get(k)[2] != verify_dict[k]:
                        self.fail(
                            "Content at key {0}: not what's expected.".format(
                                k))
                self.log.info("VERIFICATION <" + msg + ">: Successful")

        shell = RemoteMachineShellConnection(self.master)
        shell.execute_cbstats("", "raw", keyname="allocator", vbid="")
        shell.disconnect()
Exemple #47
0
class OpsChangeCasTests(BucketConfig):

    def setUp(self):
        super(OpsChangeCasTests, self).setUp()
        self.prefix = "test_"
        self.expire_time = self.input.param("expire_time", 35)
        self.item_flag = self.input.param("item_flag", 0)
        self.value_size = self.input.param("value_size", 256)
        self.items = self.input.param("items", 20)
        self.rest = RestConnection(self.master)
        self.client = VBucketAwareMemcached(self.rest, self.bucket)

    def tearDown(self):
        super(OpsChangeCasTests, self).tearDown()

    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        # not supported in 4.6 self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_failover(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        time.sleep(60)

        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        #print 'replica CAS {0}'.format(replica_CAS)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_soft_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # restart nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        time.sleep(60)
        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test Incremental sets on cas and max cas values for keys
    '''
    def test_cas_set(self):
        self.log.info(' Starting test-sets')
        self._load_ops(ops='set', mutations=20)
        time.sleep(60)
        self._check_cas(check_conflict_resolution=False)

    ''' Test Incremental updates on cas and max cas values for keys
    '''
    def test_cas_updates(self):
        self.log.info(' Starting test-updates')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace',mutations=20)
        #self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    ''' Test Incremental deletes on cas and max cas values for keys
    '''
    def test_cas_deletes(self):
        self.log.info(' Starting test-deletes')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    ''' Test expiry on cas and max cas values for keys
    '''
    def test_cas_expiry(self):
        self.log.info(' Starting test-expiry')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        #self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='expiry')
        self._check_cas(check_conflict_resolution=False)
        self._check_expiry()

    ''' Test touch on cas and max cas values for keys
    '''
    def test_cas_touch(self):
        self.log.info(' Starting test-touch')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        #self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='touch')
        self._check_cas(check_conflict_resolution=False)

    ''' Test getMeta on cas and max cas values for keys
    '''
    def test_cas_getMeta(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace',mutations=20)
        self._check_cas(check_conflict_resolution=False)

        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    def test_cas_setMeta_lower(self):

        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0
        # Select arbit key
        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            TEST_CAS = k

            cas = mc_active.getMeta(key)[4]
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, cas, '123456789',vbucket_id,
                                                    add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            #print max_cas
            self.assertTrue(cas_post_meta >= max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 225, cas+1, '223456789',vbucket_id,
                                                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr3 {0}'.format(get_meta_3)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            #self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

    def test_cas_setMeta_higher(self):

        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr {0}'.format(get_meta_1)
            #print '-'*100
            TEST_SEQNO = 123
            TEST_CAS = 9966180844186042368

            cas = mc_active.getMeta(key)[4]
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id,
                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not equal it is {0}'.format(cas_post_meta))
            self.assertTrue(max_cas > cas, '[ERROR]Max cas  is not higher than original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS+1, '223456789',vbucket_id,
                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr3 {0}'.format(get_meta_3)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not lower it is higher than {0}'.format(cas_post_meta))
            #self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

    ''' Test deleteMeta on cas and max cas values for keys
    '''
    def test_cas_deleteMeta(self):

        self.log.info(' Starting test-deleteMeta')


        # load 20 kvs and check the CAS
        self._load_ops(ops='set', mutations=20)
        time.sleep(60)
        self._check_cas(check_conflict_resolution=False)

        k=0
        test_cas = 456

        while k<1:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            test_cas = test_cas + 1


            # get the meta data
            cas = mc_active.getMeta(key)[4]

            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, test_cas, '123456789',vbucket_id)


            cas_post_meta = mc_active.getMeta(key)[4]

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta < max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            print 'cas post {0}'.format(cas_post_meta)


            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, test_cas, '123456789',vbucket_id)
            cas_post_meta = mc_active.getMeta(key)[4]

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta < max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0,json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            self.log.info('Doing delete with meta, using a lower CAS value')
            get_meta_pre = mc_active.getMeta(key)[4]
            del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, test_cas, test_cas+1)
            get_meta_post = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas > test_cas+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, test_cas))

    ''' Testing skipping conflict resolution, whereby the last write wins, and it does neither cas CR nor rev id CR
    '''
    def test_cas_skip_conflict_resolution(self):

        self.log.info(' Starting test_cas_skip_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            low_seq=12

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to allow insertion of lower Seq Number')
            lower_cas = int(cas)-1
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')

            #print 'cas meta data after set_meta_force {0}'.format(cas_post_meta)
            #print 'all meta data after set_meta_force {0}'.format(all_post_meta)
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq > post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        '''
    def test_revid_conflict_resolution(self):

        self.log.info(' Starting test_cas_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, cas, '123456789',vbucket_id,
                                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing conflict resolution, where timeSync is enabled and cas is lower but higher revid, expect Higher Cas to Win
        '''
    def test_cas_conflict_resolution(self):

        self.log.info(' Starting test_cas_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            lower_cas = int(cas)-100
            self.log.info('Forcing lower rev-id to win with higher CAS value, instead of higher rev-id with Lower Cas ')
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, lower_cas, '123456789',vbucket_id)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect CAS conflict_resolution to occur, and the first mutation to be the winner..')

            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a restart server'''
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        k=0

        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a rebalance server'''
    def test_rebalance_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])
        rebalance.result()
        time.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a failover server'''
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0,json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [] ,[self.master])

        rebalance.result()
        time.sleep(120)
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test getMeta on cas and max cas values for empty vbucket
    '''
    def test_cas_getMeta_empty_vBucket(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        k=0
        all_keys = []
        while k<10:
            k+=1
            key = "{0}{1}".format(self.prefix, k)
            all_keys.append(key)

        vbucket_ids = self.client._get_vBucket_ids(all_keys)

        print 'bucket_ids'
        for v in vbucket_ids:
            print v

        print 'done'

        i=1111
        if i not in vbucket_ids and i <= 1023:
            vb_non_existing=i
        elif i>1023:
            i +=1
        else:
            self.log.info('ERROR generating empty vbucket id')

        vb_non_existing=vbucket_ids.pop()
        print 'nominated vb_nonexisting is {0}'.format(vb_non_existing)
        mc_active = self.client.memcached(all_keys[0]) #Taking a temp connection to the mc.
        #max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(vb_non_existing) + ':max_cas'] )
        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(all_keys[0])) + ':max_cas'] )
        self.assertTrue( max_cas != 0, msg='[ERROR] Max cas is non-zero')


    ''' Test addMeta on cas and max cas values for keys
    '''
    def test_meta_backup(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        '''Do the backup on the bucket '''
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
            print 'Done with restore'
        finally:
            self._check_cas(check_conflict_resolution=False)

    ''' Common function to verify the expected values on cas
    '''
    def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None):
        self.log.info(' Verifying cas and max cas for the keys')
        #select_count = 20 #Verifying top 20 keys
        if master:
            self.rest = RestConnection(master)
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)

            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            #print 'max_cas is {0}'.format(max_cas)
            self.assertTrue(cas == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False)
                if time_sync == 'enabledWithoutDrift':
                    self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set')
                elif time_sync == 'disabled':
                    self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')

    ''' Common function to add set delete etc operations on the bucket
    '''
    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops=='set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0,payload)
                elif ops=='add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0,payload)
                elif ops=='replace':
                    self.client.memcached(key).replace(key, 0, 0,payload)
                    #print 'Replace'
                elif ops=='delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops=='expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time ,0, payload)
                elif ops=='touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))

    '''Check if items are expired as expected'''
    def _check_expiry(self):
        time.sleep(self.expire_time+30)

        k=0
        while k<10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)
            cas = mc_active.getMeta(key)[4]
            self.log.info("Try to mutate an expired item with its previous cas {0}".format(cas))
            try:
                all = mc_active.getMeta(key)
                a=self.client.memcached(key).get(key)
                self.client.memcached(key).cas(key, 0, self.item_flag, cas, 'new')
                all = mc_active.getMeta(key)

                raise Exception("The item should already be expired. We can't mutate it anymore")
            except MemcachedError as error:
            #It is expected to raise MemcachedError becasue the key is expired.
                if error.status == ERR_NOT_FOUND:
                    self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
                    pass
                else:
                    raise Exception(error)
class SyncReplicationTest(unittest.TestCase):
    awareness = None

    def common_setup(self, replica):
        self._input = TestInputSingleton.input
        self._servers = self._input.servers
        first = self._servers[0]
        self.log = logger.Logger().get_logger()
        self.log.info(self._input)
        rest = RestConnection(first)
        for server in self._servers:
            RestHelper(RestConnection(server)).is_ns_server_running()

        ClusterOperationHelper.cleanup_cluster(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        ClusterOperationHelper.add_all_nodes_or_assert(
            self._servers[0], self._servers, self._input.membase_settings,
            self)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        info = rest.get_nodes_self()
        bucket_ram = info.mcdMemoryReserved * 3 / 4
        rest.create_bucket(bucket="default",
                           ramQuotaMB=int(bucket_ram),
                           replicaNumber=replica,
                           proxyPort=rest.get_nodes_self().moxi)
        msg = "wait_for_memcached fails"
        ready = BucketOperationHelper.wait_for_memcached(first, "default"),
        self.assertTrue(ready, msg)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(first.ip))
        self.log.info('started rebalance operation on master node {0}'.format(
            first.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        # without a bucket this seems to fail
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))
        self.awareness = VBucketAwareMemcached(rest, "default")

    def tearDown(self):
        if self.awareness:
            self.awareness.done()
            ClusterOperationHelper.cleanup_cluster(self._servers)
            BucketOperationHelper.delete_all_buckets_or_assert(
                self._servers, self)

    def test_one_replica(self):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 1024)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            mc.sync_replication([{"key": k, "vbucket": vBucket}], 1)
        for k in keys:
            mc = self.awareness.memcached(k)
            mc.get(k)

    def test_one_replica_one_node(self):
        pass

    def test_one_replica_multiple_nodes(self):
        pass

    def test_one_replica_bucket_replica_one(self):
        pass

    def test_two_replica(self):
        self._unsupported_replicas(2)

    def test_three_replica(self):
        self._unsupported_replicas(1)

    def _unsupported_replicas(self, replica):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 102400)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            mc.get(k)
            try:
                mc.sync_replication([{"key": k, "vbucket": vBucket}], replica)
                msg = "server did not raise an error when running sync_replication with {0} replicas"
                self.fail(msg.format(replica))
            except MemcachedError as error:
                self.log.info("error {0} {1} as expected".format(
                    error.status, error.msg))

        for k in keys:
            mc = self.awareness.memcached(k)
            mc.get(k)

    def test_invalid_key(self):
        pass

    def test_not_your_vbucket(self):
        self.common_setup(1)
        keys = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, 100)]
        value = MemcachedClientHelper.create_value("*", 1024)
        for k in keys:
            vBucket = crc32.crc32_hash(k)
            mc = self.awareness.memcached(k)
            mc.set(k, 0, 0, value)
            not_your_vbucket_mc = self.awareness.not_my_vbucket_memcached(k)
            try:
                count = 0
                expected_error = 0
                while count < 100:
                    a, b, response = not_your_vbucket_mc.sync_replication(
                        [{
                            "key": k,
                            "vbucket": vBucket
                        }], 1)
                    count += 1
                    self.log.info("response : {0}".format(response))
                    if response and response[0]["event"] != "invalid key":
                        expected_error += 1
                if expected_error is not 100:
                    self.fail(
                        msg=
                        "server did not raise an error when running sync_replication with invalid vbucket"
                    )
            except MemcachedError as error:
                self.log.error(error)

    def test_some_invalid_keys(self):
        pass

    def stest_ome_not_your_vbucket(self):
        pass

    def test_some_large_values(self):
        pass

    def test_too_many_keys(self):
        pass

    def test_singlenode(self):
        pass
Exemple #49
0
from resourceparser import ServerInfo

sys.path.append("lib")
sys.path.append("pytests")

server = ServerInfo()
server.ip = "127.0.0.1"
server.rest_username = '******'
server.rest_password = '******'
server.port = 9000
rest = RestConnection(server)
nodes = rest.node_statuses()

vm = VBucketAwareMemcached(rest,{"name":"bucket-0","password":""})
key = str(uuid.uuid4())
vm.memcached(key).set(key, 0, 0, "hi")
vm.memcached(key).get(key)

RebalanceHelper.print_taps_from_all_nodes(rest,bucket="bucket-0",password="")
RebalanceHelper.verify_items_count(server,"bucket-0")
RebalanceHelper.verify_items_count(server,"bucket-1")
RebalanceHelper.verify_items_count(server,"bucket-2")
RebalanceHelper.wait_till_total_numbers_match(server,"bucket-0",120,"")


cm = MemcachedClientHelper.proxy_client(server, "bucket-0", "")
key = str(uuid.uuid4())
cm.set(key, 0, 0, "hi")
cm.get(key)

    def test_new_response_fields(self):

        self.log.info('\n\nStarting test_new_response_fields')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')

        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)

        set_resp = self.extract_vbucket_uuid_and_seqno(
            client.set('test1key', 0, 0, '123456789'))

        # test the inplace operations
        test = client.generic_request(
            client.memcached('test1key').set, 'test1key', 0, 0,
            'totally new value')
        replace_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').replace, 'test1key', 0, 0,
                'totally new value'))
        self.verify_vbucket_and_seqno(set_resp, replace_resp, 'replace')

        append_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').append, 'test1key',
                'appended data'))
        self.verify_vbucket_and_seqno(replace_resp, append_resp, 'append')

        prepend_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').prepend, 'test1key',
                'prepended data'))
        self.verify_vbucket_and_seqno(append_resp, prepend_resp, 'prepend')

        # and finally do the delete
        delete_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('test1key').delete, 'test1key'))
        self.verify_vbucket_and_seqno(set_resp, delete_resp, 'delete')

        #meta commands under construction
        # test the 'meta' commands
        TEST_SEQNO = 123
        TEST_CAS = 456

        set_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').set_with_meta,
            'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, '123456789')
        set_meta_vbucket_uuid, set_meta_seqno = struct.unpack(
            '>QQ', set_with_meta_resp[2])
        set_with_meta_dict = {
            'vbucket_uuid': set_meta_vbucket_uuid,
            'seqno': set_meta_seqno
        }

        get_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').getMeta, 'test1keyformeta')
        self.assertTrue(TEST_SEQNO == get_meta_resp[3], \
               msg='get meta seqno does not match as set. Expected {0}, actual {1}'.format(TEST_SEQNO, get_meta_resp[3]) )
        self.assertTrue(TEST_CAS == get_meta_resp[4], \
               msg='get meta cas does not match as set. Expected {0}, actual {1}'.format(TEST_CAS, get_meta_resp[4]) )

        #   def del_with_meta(self, key, exp, flags, seqno, old_cas, new_cas, vbucket= -1):
        del_with_meta_resp = client.generic_request(
            client.memcached('test1keyformeta').del_with_meta,
            'test1keyformeta', 0, 0, TEST_SEQNO, TEST_CAS, TEST_CAS + 1)
        vbucket_uuid, seqno = struct.unpack('>QQ', del_with_meta_resp[2])
        del_with_meta_dict = {'vbucket_uuid': vbucket_uuid, 'seqno': seqno}

        self.verify_vbucket_and_seqno(set_with_meta_dict, del_with_meta_dict,
                                      'set/del with meta')

        #  do some integer operations
        set_resp = self.extract_vbucket_uuid_and_seqno(
            client.set('key-for-integer-value', 0, 0, '123'))
        incr_resp = client.generic_request(
            client.memcached('key-for-integer-value').incr,
            'key-for-integer-value')
        incr_resp_dict = {'vbucket_uuid': incr_resp[2], 'seqno': incr_resp[3]}
        self.verify_vbucket_and_seqno(set_resp, incr_resp_dict, 'incr')

        decr_resp = client.generic_request(
            client.memcached('key-for-integer-value').decr,
            'key-for-integer-value')
        decr_resp_dict = {'vbucket_uuid': decr_resp[2], 'seqno': decr_resp[3]}
        self.verify_vbucket_and_seqno(incr_resp_dict, decr_resp_dict, 'decr')

        add_resp = self.extract_vbucket_uuid_and_seqno(
            client.generic_request(
                client.memcached('totally new key').add, 'totally new key', 0,
                0, 'totally new value'))

        self.assertTrue(add_resp['vbucket_uuid'] > 0,
                        msg='Add request vbucket uuid is zero')

        self.log.info('\n\nComplete test_new_response_fields\n\n')
Exemple #51
0
class Upgrade_EpTests(UpgradeTests):
    def setUp(self):
        super(Upgrade_EpTests, self).setUp()
        print(self.master)
        self.rest = RestConnection(self.master)
        self.bucket = 'default'  # temp fix
        self.client = VBucketAwareMemcached(self.rest, self.bucket)
        self.time_synchronization = 'disabled'
        print('checking for self.servers '.format(self.servers[1]))
        self.prefix = "test_"
        self.expire_time = 5
        self.item_flag = 0
        self.value_size = 256

    def tearDown(self):
        #super(Upgrade_EpTests, self).tearDown()
        self.testcase = '2'
        if not "skip_cleanup" in TestInputSingleton.input.test_params:
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self.testcase)
            ClusterOperationHelper.cleanup_cluster(self.servers)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                self.servers, self.testcase)

    def test_upgrade(self):
        self.log.info('Starting upgrade tests...')

        #o = OpsChangeCasTests()

        self.log.info('Inserting few items pre upgrade')
        self._load_ops(ops='set',
                       mutations=20,
                       master=self.master,
                       bucket=self.bucket)
        self.log.info('Upgrading ..')
        try:
            UpgradeTests.test_upgrade(self)
        finally:
            self.log.info(' Done with Upgrade ')

        self.log.info('Testing the meta details on items post upgrade')
        #self._check_config()
        self._check_cas(check_conflict_resolution=True,
                        master=self.servers[1],
                        bucket=self.bucket)

    def _check_config(self):
        result = self.rest.get_bucket_json(self.bucket)["timeSynchronization"]
        print(result)
        self.assertEqual(
            result,
            self.time_synchronization,
            msg='ERROR, Mismatch on expected time synchronization values')
        self.log.info("Verified results")

    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k = 0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k < 10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops == 'set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0, payload)
                elif ops == 'add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0, payload)
                elif ops == 'replace':
                    self.client.memcached(key).replace(key, 0, 0, payload)
                    #print 'Replace'
                elif ops == 'delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops == 'expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time, 0,
                                                   payload)
                elif ops == 'touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))

    ''' Common function to verify the expected values on cas
    '''

    def _check_cas(self,
                   check_conflict_resolution=False,
                   master=None,
                   bucket=None,
                   time_sync=None):
        self.log.info(' Verifying cas and max cas for the keys')
        if master:
            self.rest = RestConnection(master)
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k = 0

        while k < 10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)

            cas = mc_active.getMeta(key)[4]
            max_cas = int(
                mc_active.stats('vbucket-details')
                ['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'])
            #print 'max_cas is {0}'.format(max_cas)
            self.assertTrue(cas == max_cas,
                            '[ERROR]Max cas  is not 0 it is {0}'.format(cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(
                    key, request_extended_meta_data=True)
                if time_sync == 'enabledWithoutDrift':
                    self.assertTrue(
                        get_meta_resp[5] == 1,
                        msg=
                        '[ERROR] Metadata indicate conflict resolution is not set'
                    )
                elif time_sync == 'disabled':
                    self.assertTrue(
                        get_meta_resp[5] == 0,
                        msg=
                        '[ERROR] Metadata indicate conflict resolution is set')