Exemplo n.º 1
0
    def corrupt_cas_is_healed_on_reboot(self):
        self.log.info('Start corrupt_cas_is_healed_on_reboot')

        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')

        # set a key
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'}))
        # client.memcached(KEY_NAME).set('k2', 0, 0,json.dumps({'value':'value2'}))

        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)

        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active,KEY_NAME)

        # print 'max cas k2', mc_active.getMeta('k2')[4]

        # CAS should be 0 now, do some gets and sets to verify that nothing bad happens

        # self._restart_memcache('default')
        remote = RemoteMachineShellConnection(self.master)
        remote.stop_server()
        time.sleep(30)
        remote.start_server()
        time.sleep(30)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)

        maxCas = mc_active.getMeta(KEY_NAME)[4]
        self.assertTrue(maxCas == 0, 'max cas after reboot is not 0 it is {0}'.format(maxCas))
Exemplo n.º 2
0
    def corrupt_cas_is_healed_on_reboot(self):
        self.log.info('Start corrupt_cas_is_healed_on_reboot')

        KEY_NAME = 'key1'

        rest = RestConnection(self.master)

        # set a key
        client = VBucketAwareMemcached(rest, 'default')
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                       json.dumps({'value': 'value1'}))
        # client.memcached(KEY_NAME).set('k2', 0, 0,json.dumps({'value':'value2'}))

        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)

        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active, KEY_NAME)
        corrupt_cas = mc_active.getMeta(KEY_NAME)[4]

        # self._restart_memcache('default')
        remote = RemoteMachineShellConnection(self.master)
        remote.stop_server()
        time.sleep(30)
        remote.start_server()
        time.sleep(30)

        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)

        curr_cas = mc_active.getMeta(KEY_NAME)[4]
        self.assertTrue(
            curr_cas == corrupt_cas,
            'Corrupted cas (%s) != curr_cas (%s)' % (corrupt_cas, curr_cas))
Exemplo n.º 3
0
    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                           json.dumps({'value': value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id)
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int(
            mc_active.stats('vbucket-details')[
                'vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'])

        self.assertTrue(
            cas_active == max_cas,
            '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [],
                                                 [self.master])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME,
                                           request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:],
                                                 [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(
            replica_CAS == active_CAS,
            'cas mismatch active: {0} replica {1}'.format(
                active_CAS, replica_CAS))
    def corrupt_cas_is_healed_on_rebalance_out_in(self):

        self.log.info('Start corrupt_cas_is_healed_on_rebalance_out_in')

        KEY_NAME = 'key1'


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')

        # set a key
        client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,json.dumps({'value':'value1'}))


        # figure out which node it is on
        mc_active = client.memcached(KEY_NAME)
        mc_replica = client.memcached( KEY_NAME, replica_index=0 )


        # set the CAS to -2 and then mutate to increment to -1 and then it should stop there
        self._corrupt_max_cas(mc_active,KEY_NAME)

        # CAS should be 0 now, do some gets and sets to verify that nothing bad happens


        resp = mc_active.get(KEY_NAME)
        self.log.info( 'get for {0} is {1}'.format(KEY_NAME, resp))


        # remove that node
        self.log.info('Remove the node with -1 max cas')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [] ,[self.master])
        #rebalance = self.cluster.async_rebalance([self.master], [], self.servers[-1:])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]



        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])
        #rebalance = self.cluster.async_rebalance([self.master], self.servers[-1:],[])

        rebalance.result()


        # verify the CAS is good
        client = VBucketAwareMemcached(rest, 'default')
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]


        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS))
    def test_CASnotzero(self):
        # MB-31149
        # observe.observeseqnotests.ObserveSeqNoTests.test_CASnotzero
        # set value, append and check CAS value
        self.log.info('Starting test_CASnotzero')

        # without hello(mutationseqencenumber)
        client = VBucketAwareMemcached(RestConnection(self.master), 'default')
        KEY_NAME = "test1key"
        client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
        client.generic_request(
            client.memcached(KEY_NAME).append, 'test1key', 'appended data')
        get_meta_resp = client.generic_request(
            client.memcached(KEY_NAME).getMeta, 'test1key')
        self.log.info(
            'the CAS value without hello(mutationseqencenumber): {} '.format(
                get_meta_resp[4]))
        self.assertNotEqual(get_meta_resp[4], 0)

        # with hello(mutationseqencenumber)
        KEY_NAME = "test2key"
        client.set(KEY_NAME, 0, 0, json.dumps({'value': 'value1'}))
        h = client.sendHellos(
            memcacheConstants.PROTOCOL_BINARY_FEATURE_MUTATION_SEQNO)
        client.generic_request(
            client.memcached(KEY_NAME).append, 'test2key', 'appended data456')

        get_meta_resp = client.generic_request(
            client.memcached(KEY_NAME).getMeta, 'test2key')
        self.log.info(
            'the CAS value with hello(mutationseqencenumber): {} '.format(
                get_meta_resp[4]))
        self.assertNotEqual(get_meta_resp[4], 0)
Exemplo n.º 6
0
    def _getr_items(self, item_count, replica_count, prefix, vprefix=""):
        time_start = time.time()
        get_count = 0
        last_error = ""
        error_count = 0
        awareness = VBucketAwareMemcached(self.rest, self.default_bucket_name)
        for r in range(replica_count):
            for i in range(item_count):
                retry = True

                key = prefix + "_key_" + str(i)
                while retry:
                    client = awareness.memcached(key, r)
                    try:
                        value = client.getr(prefix + "_key_" + str(i))[2]
                        assert(value == vprefix + "_value_" + str(i))
                        get_count += 1
                        retry = False
                    except mc_bin_client.MemcachedError as e:
                        last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e)
                        error_count += 1
                        if e.status == 7:
                            self.log.info("getting new vbucket map {0}")
                            awareness.reset(self.rest)
                        else:
                            retry = False
                    except Exception as e:
                        last_error = "failed to getr key {0}, error: {1}".format(prefix + "_key_" + str(i), e)
                        error_count += 1
                        retry = False
        if error_count > 0:
            self.log.error("got {0} errors, last error: {1}".format(error_count, last_error))
        self.log.info("got {0} replica items in {1} seconds".format(get_count, time.time() - time_start))
        awareness.done()
        return get_count
Exemplo n.º 7
0
    def verify_single_node(self, server, kv_store=1):
        """This is the verification function for single node backup.

        Args:
          server: the master server in the cluster as self.master.
          kv_store: default value is 1. This is the key of the kv_store of each bucket.

        If --single-node flag appears in backup commad line, we just backup all the items
        from a single node (the master node in this case). For each bucket, we request for the vBucketMap. For every key
        in the kvstore of that bucket, we use hash function to get the vBucketId corresponding to that
        key. By using the vBucketMap, we can know whether that key is in master node or not.
        If yes, keep it. Otherwise delete it."""

        rest = RestConnection(server)
        for bucket in self.buckets:
            VBucketAware = VBucketAwareMemcached(rest, bucket.name)
            memcacheds, vBucketMap, vBucketMapReplica = VBucketAware.request_map(rest, bucket.name)
            valid_keys, deleted_keys = bucket.kvs[kv_store].key_set()
            for key in valid_keys:
                vBucketId = VBucketAware._get_vBucket_id(key)
                which_server = vBucketMap[vBucketId]
                sub = which_server.find(":")
                which_server_ip = which_server[:sub]
                if which_server_ip != server.ip:
                    partition = bucket.kvs[kv_store].acquire_partition(key)
                    partition.delete(key)
                    bucket.kvs[kv_store].release_partition(key)

        self._verify_all_buckets(server, kv_store, self.wait_timeout * 50, self.max_verify, True, 1)
Exemplo n.º 8
0
    def _verify_es_values(self,
                          src_server,
                          dest_server,
                          kv_store=1,
                          verification_count=10000):
        cb_rest = RestConnection(src_server)
        es_rest = RestConnection(dest_server)
        buckets = self.xd_ref._get_cluster_buckets(src_server)
        for bucket in buckets:
            mc = VBucketAwareMemcached(cb_rest, bucket)
            es_valid = es_rest.all_docs(indices=[bucket.name],
                                        size=verification_count)

            # compare values of es documents to documents in couchbase
            for row in es_valid[:verification_count]:
                key = str(row['meta']['id'])

                try:
                    _, _, doc = mc.get(key)
                    val_src = str(json.loads(doc)['site_name'])
                    val_dest = str(row['doc']['site_name'])
                    if val_src != val_dest:
                        self.xd_ref.fail("Document %s has unexpected value (%s) expected (%s)" % \
                                        (key, val_src, val_dest))
                except MemcachedError as e:
                    self.xd_ref.fail(
                        "Error during verification.  Index contains invalid key: %s"
                        % key)

            self._log.info("Verified doc values in couchbase bucket (%s) match values in elastic search" % \
                                 (bucket.name))
Exemplo n.º 9
0
    def run_load(rest, bucket, task, kv_store):
        smart = VBucketAwareMemcached(rest, bucket)
        docs_iterators = task["docs"]
        do_sets, do_gets, do_deletes, do_with_expiration = RebalanceDataGenerator.decode_ops(
            task)
        doc_ids = []
        expiration = 0
        if do_with_expiration:
            expiration = task["expiration"]

        for docs_iterator in docs_iterators:
            for value in docs_iterator:
                _value = value.encode("ascii", "ignore")
                _json = json.loads(_value, encoding="utf-8")
                _id = _json["meta"]["id"].encode("ascii", "ignore")
                _value = json.dumps(_json["json"]).encode("ascii", "ignore")
                #                    _value = json.dumps(_json)
                try:
                    RebalanceDataGenerator.do_mc(rest, smart, _id, _value,
                                                 kv_store, doc_ids, do_sets,
                                                 do_gets, do_deletes,
                                                 do_with_expiration,
                                                 expiration)
                except:
                    traceback.print_exc(file=sys.stdout)
                    #post the results into the queue
        return
Exemplo n.º 10
0
    def test_capi_with_checkpointing(self):
        repl_id = self._start_es_replication(xdcr_params={"checkpointInterval":"60"})

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self.sleep(120)

        vb0_node = None
        nodes = self.src_cluster.get_nodes()
        ip = VBucketAwareMemcached(rest_conn,'default').vBucketMap[0].split(':')[0]
        for node in nodes:
            if ip == node.ip:
                vb0_node = node
        if not vb0_node:
            raise XDCRCheckpointException("Error determining the node containing active vb0")
        vb0_conn = RestConnection(vb0_node)
        try:
            checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl_id)
            self.log.info("Checkpoint record : {0}".format(checkpoint_record))
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e))

        self._verify_es_results()
Exemplo n.º 11
0
    def test_capi_with_checkpointing(self):
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}', range(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()
        self._wait_for_replication_to_catchup()
        self.sleep(120)

        vb0_node = None
        nodes = self.src_cluster.get_nodes()
        ip = VBucketAwareMemcached(self.rest, 'default').vBucketMap[0].split(':')[0]
        for node in nodes:
            if ip == node.ip:
                vb0_node = node
        if not vb0_node:
            raise XDCRCheckpointException("Error determining the node containing active vb0")
        vb0_conn = RestConnection(vb0_node)
        try:
            repl = vb0_conn.get_replication_for_buckets('default', 'default')
            checkpoint_record = vb0_conn.get_recent_xdcr_vb_ckpt(repl['id'])
            self.log.info("Checkpoint record : {0}".format(checkpoint_record))
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e))

        self._verify_es_results()
Exemplo n.º 12
0
    def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None):
        self.log.info(' Verifying cas and max cas for the keys')
        #select_count = 20 #Verifying top 20 keys
        if master:
            self.rest = RestConnection(master)
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)

            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            #print 'max_cas is {0}'.format(max_cas)
            self.assertTrue(cas == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False)
                if time_sync == 'enabledWithoutDrift':
                    self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set')
                elif time_sync == 'disabled':
                    self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')
Exemplo n.º 13
0
    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops=='set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0, payload)
                elif ops=='add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0, payload)
                elif ops=='replace':
                    self.client.memcached(key).replace(key, 0, 0, payload)
                    #print 'Replace'
                elif ops=='delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops=='expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time, 0, payload)
                elif ops=='touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))
Exemplo n.º 14
0
    def do_get_random_key(self):
        # MB-31548, get_Random key gets hung sometimes.
        self.log.info("Creating few docs in the bucket")
        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        key = "test_docs-"
        for index in range(1000):
            doc_key = key + str(index)
            client.memcached(doc_key).set(doc_key, 0, 0,
                                          json.dumps({'value': 'value1'}))

        self.log.info("Performing random_gets")
        mc = MemcachedClient(self.master.ip, 11210)
        mc.sasl_auth_plain(self.master.rest_username,
                           self.master.rest_password)
        mc.bucket_select('default')

        count = 0
        while (count < 1000000):
            count = count + 1
            try:
                mc.get_random_key()
            except MemcachedError as error:
                self.fail("<MemcachedError #%d ``%s''>" %
                          (error.status, error.message))
            if count % 1000 == 0:
                self.log.info('The number of iteration is {}'.format(count))
Exemplo n.º 15
0
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'

        gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
        gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
                                                     cluster_host="localhost:8091",
                                                     user=self.master.rest_username,
                                                     password=self.master.rest_password,
                                                     options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            self.servers[:self.nodes_init], self,
            wait_time=self.wait_timeout, wait_if_warmup=True)
        self.sleep(10, 'Wait some time before next load')
        # self._load_all_buckets(self.master, gen_create2, "create", 0)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0, 0, json.dumps({'value': 'value2'}))
            self.fail('Bucket is incorrectly functional')
        except MemcachedError as e:
            pass  # this is the exception we are hoping for
Exemplo n.º 16
0
    def key_not_exists_test(self):
        self.assertTrue(len(self.buckets) > 0, 'at least 1 bucket required')
        bucket = self.buckets[0].name
        client = VBucketAwareMemcached(RestConnection(self.master), bucket)
        KEY_NAME = 'key'

        for i in range(1500):
            client.set(KEY_NAME, 0, 0, "x")
            # delete and verify get fails
            client.delete(KEY_NAME)
            err = None
            try:
                rc = client.get(KEY_NAME)
            except MemcachedError as error:
                # It is expected to raise MemcachedError because the key is deleted.
                err = error.status
            self.assertTrue(err == ERR_NOT_FOUND,
                            'expected key to be deleted {0}'.format(KEY_NAME))

            #cas errors do not sleep the test for 10 seconds, plus we need to check that the correct
            #error is being thrown
            err = None
            try:
                #For some reason replace instead of cas would not reproduce the bug
                mc_active = client.memcached(KEY_NAME)
                mc_active.replace(KEY_NAME, 0, 10, "value")
            except MemcachedError as error:
                err = error.status
            self.assertTrue(
                err == ERR_NOT_FOUND,
                'was able to replace cas on removed key {0}'.format(KEY_NAME))
Exemplo n.º 17
0
 def do_verification(kv_store, rest, bucket):
     keys = kv_store.keys()
     smart = VBucketAwareMemcached(rest, bucket)
     validation_failures = {}
     for k in keys:
         expected = kv_store.read(k)
         if expected:
             if expected["status"] == "deleted":
                 # it should not exist there ?
                 try:
                     smart.memcached(k).get(k)
                     validation_failures[k] = ["deleted key"]
                 except MemcachedError:
                     pass
             elif expected["status"] == "expired":
                 try:
                     smart.memcached(k).get(k)
                     validation_failures[k] = ["expired key"]
                 except MemcachedError:
                     pass
             else:
                 try:
                     x, y, value = smart.memcached(k).get(k)
                     actualmd5 = hashlib.md5(value).digest()
                     if actualmd5 != expected["value"]:
                         validation_failures[k] = ["value mismatch"]
                 except MemcachedError:
                     validation_failures[k] = ["key not found"]
     return validation_failures
Exemplo n.º 18
0
    def verify_one_node_has_time_sync_and_one_does_not(self):

        # need to explicitly enable and disable sync when it is supported
        self.log.info(
            '\n\nStarting verify_one_node_has_time_sync_and_one_does_not')

        client = VBucketAwareMemcached(RestConnection(self.master), 'default')

        vbucket_id = client._get_vBucket_id(LWW_EP_Engine.TEST_KEY)
        mc_master = client.memcached_for_vbucket(vbucket_id)
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        # set for master but not for the replica
        result = mc_master.set_time_drift_counter_state(vbucket_id, 0, 1)

        mc_master.set(LWW_EP_Engine.TEST_KEY, 0, 0, LWW_EP_Engine.TEST_VALUE)

        get_meta_resp = mc_master.getMeta(LWW_EP_Engine.TEST_KEY,
                                          request_extended_meta_data=True)

        self.assertTrue(
            get_meta_resp[5] == 1,
            msg='Metadata indicates conflict resolution is not set')

        self.log.info(
            '\n\nEnding verify_one_node_has_time_sync_and_one_does_not')
Exemplo n.º 19
0
 def insert_docs(self,
                 num_of_docs,
                 prefix='doc',
                 extra_values={},
                 return_docs=False,
                 scope=None,
                 collection=None):
     random.seed(12345)
     rest = RestConnection(self.master)
     smart = VBucketAwareMemcached(rest, self.bucket)
     doc_names = []
     for i in range(0, num_of_docs):
         key = doc_name = "{0}-{1}".format(prefix, i)
         geom = {
             "type":
             "Point",
             "coordinates":
             [random.randrange(-180, 180),
              random.randrange(-90, 90)]
         }
         value = {
             "name": doc_name,
             "age": random.randrange(1, 1000),
             "geometry": geom,
             "height": random.randrange(1, 13000),
             "bloom": random.randrange(1, 6),
             "shed_leaves": random.randrange(6, 13)
         }
         value.update(extra_values)
         if not return_docs:
             doc_names.append(doc_name)
         else:
             doc_names.append(value)
         # loop till value is set
         fail_count = 0
         while True:
             try:
                 smart.set(key,
                           0,
                           0,
                           json.dumps(value),
                           scope=scope,
                           collection=collection)
                 break
             except MemcachedError as e:
                 fail_count += 1
                 if (e.status == 133 or e.status == 132
                         or e.status == 134) and fail_count < 60:
                     if i == 0:
                         self.log.error(
                             "waiting 5 seconds. error {0}".format(e))
                         time.sleep(5)
                     else:
                         self.log.error(e)
                         time.sleep(1)
                 else:
                     raise e
     self.log.info("Inserted {0} json documents".format(num_of_docs))
     return doc_names
Exemplo n.º 20
0
 def insert_key(serverInfo, bucket_name, count, size):
     rest = RestConnection(serverInfo)
     smart = VBucketAwareMemcached(rest, bucket_name)
     for i in xrange(count * 1000):
         key = "key_" + str(i)
         flag = random.randint(1, 999)
         value = {"value": MemcachedClientHelper.create_value("*", size)}
         smart.memcached(key).set(key, 0, 0, json.dumps(value))
Exemplo n.º 21
0
 def _poxi(self):
     tServer = TestInputServer()
     tServer.ip = self.server_ip
     tServer.rest_username = "******"
     tServer.rest_password = "******"
     tServer.port = 8091
     rest = RestConnection(tServer)
     return VBucketAwareMemcached(rest, self.bucket_name)
Exemplo n.º 22
0
    def verify_vBuckets_info(master, bucket="default"):
        '''
        verify vBuckets' state and items count(for active/replica) in them related to vBucketMap for all nodes in cluster
        '''
        log = logging.getLogger("infra")
        awareness = VBucketAwareMemcached(RestConnection(master), bucket)
        vb_map = awareness.vBucketMap
        vb_mapReplica = awareness.vBucketMapReplica
        replica_num = len(vb_mapReplica[0])

        #get state and count items for all vbuckets for each node
        node_stats = RebalanceHelper.get_vBuckets_info(master)
        state = True
        #iterate throught all vbuckets by their numbers
        for num in vb_map:
            #verify that active vbucket in memcached  is also active in stats("hash)
            if (node_stats[vb_map[num]]["vb_" + str(num)][0] != "active"):
                log.info("vBucket {0} in {1} node has wrong state {3}".format(
                    "vb_" + str(num), vb_map[num],
                    node_stats[vb_map[num]]["vb_" + str(num)]))
                state = False
            #number of active items for num vBucket
            vb = node_stats[vb_map[num]]["vb_" + str(num)][1]
            active_vb = vb_map[num]
            #list of nodes for wich num vBucket is replica
            replica_vbs = vb_mapReplica[key]
            sum_items_replica = 0
            #sum of replica items for all nodes for num vBucket
            for i in range(replica_num):
                if (node_stats[vb_mapReplica[num][i]]["vb_" + str(num)][0] !=
                        "replica"):
                    log.info(
                        "vBucket {0} in {1} node has wrong state {3}".format(
                            "vb_" + str(num), vb_mapReplica[num],
                            node_stats[vb_mapReplica[num]]["vb_" + str(num)]))
                    state = False
                sum_items_replica += int(
                    node_stats[replica_vbs[i]]["vb_" + str(num)][1])
            #print information about the discrepancy of the number of replica and active items for num vBucket
            if (int(vb) * len(vb_mapReplica[num]) != sum_items_replica):
                log.info(
                    "sum of active items doesn't correspond to replica's vBucets in {0} vBucket:"
                    .format("vb_" + str(num)))
                log.info("items in active vBucket {0}:{1}".format(
                    vb_map[num], node_stats[vb_map[num]]["vb_" + str(num)]))
                for j in range(replica):
                    log.info("items in replica vBucket {0}: {1}".format(
                        vb_mapReplica[num][j],
                        node_stats[vb_mapReplica[num][j]]["vb_" + str(num)]))
                    log.info(node_stats[vb_mapReplica[num][0]])
                state = False

        if not state:
            log.error("Something is wrong, see log above. See details:")
            log.error("vBucetMap: {0}".format(vb_map))
            log.error("vBucetReplicaMap: {0}".format(vb_mapReplica))
            log.error("node_stats: {0}".format(node_stats))
        return state
Exemplo n.º 23
0
    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
                                           json.dumps({'value': value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id)
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int(
            mc_active.stats('vbucket-details')[
                'vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'])

        self.assertTrue(cas_pre == max_cas,
                        '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        time.sleep(60)
        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME,
                                          request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(
            cas_pre == cas_post,
            'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
Exemplo n.º 24
0
 def setUp(self):
     super(OpsChangeCasTests, self).setUp()
     self.prefix = "test_"
     self.expire_time = self.input.param("expire_time", 35)
     self.item_flag = self.input.param("item_flag", 0)
     self.value_size = self.input.param("value_size", 256)
     self.items = self.input.param("items", 20)
     self.rest = RestConnection(self.master)
     self.client = VBucketAwareMemcached(self.rest, self.bucket)
Exemplo n.º 25
0
 def get_active_vb0_node(self, master):
     nodes = self.src_nodes
     ip = VBucketAwareMemcached(RestConnection(master), 'default').vBucketMap[0].split(':')[0]
     if master == self.dest_master:
         nodes = self.dest_nodes
     for node in nodes:
         if ip == node.ip:
             return node
     raise XDCRCheckpointException("Error determining the node containing active vb0")
Exemplo n.º 26
0
    def test_MB_32114(self):
        try:
            from sdk_client import SDKClient
        except:
            from sdk_client3 import SDKClient
        import couchbase.subdocument as SD

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        if self.maxttl:
            self._expiry_pager(self.master)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.master.ip],
                               bucket='default')
        KEY_NAME = 'key1'

        for i in range(1000):
            mcd = client.memcached(KEY_NAME + str(i))
            rc = mcd.set(KEY_NAME + str(i), 0, 0,
                         json.dumps({'value': 'value2'}))
            sdk_client.mutate_in(
                KEY_NAME + str(i),
                SD.upsert("subdoc_key",
                          "subdoc_val",
                          xattr=True,
                          create_parents=True))
            # wait for it to persist
            persisted = 0
            while persisted == 0:
                opaque, rep_time, persist_time, persisted, cas = client.observe(
                    KEY_NAME + str(i))

        start_time = time.time()
        self._load_doc_data_all_buckets(batch_size=1000)
        end_time = time.time()

        for i in range(1000):
            try:
                mcd = client.memcached(KEY_NAME + str(i))
                _, flags, exp, seqno, cas = client.memcached(
                    KEY_NAME + str(i)).getMeta(KEY_NAME + str(i))
                rc = mcd.del_with_meta(KEY_NAME + str(i), 0, 0, 2, cas + 1)
            except MemcachedError as exp:
                self.fail("Exception with del_with meta - {0}".format(exp))
        self.cluster.compact_bucket(self.master, "default")
        if self.maxttl:
            time_to_sleep = (self.maxttl - (end_time - start_time)) + 20
            self.sleep(int(time_to_sleep))
        else:
            self.sleep(60)
        active_bucket_items = rest.get_active_key_count("default")
        replica_bucket_items = rest.get_replica_key_count("default")
        print('active_bucket_items ', active_bucket_items)
        print('replica_bucket_items ', replica_bucket_items)
        if active_bucket_items * self.num_replicas != replica_bucket_items:
            self.fail("Mismatch in data !!!")
Exemplo n.º 27
0
    def test_rollback_and_persistence_race_condition(self):

        nodeA = self.servers[0]
        vbucket_client = VBucketAwareMemcached(RestConnection(self.master),
                                               'default')
        gen_create = BlobGenerator('dcp',
                                   'dcp-',
                                   64,
                                   start=0,
                                   end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        # stop persistence
        for bucket in self.buckets:
            for s in self.servers[:self.nodes_init]:
                client = MemcachedClientHelper.direct_client(s, bucket)
                try:
                    client.stop_persistence()
                except MemcachedError as e:
                    if self.bucket_type == 'ephemeral':
                        self.assertTrue(
                            "Memcached error #4 'Invalid':  Flusher not running. for vbucket :0 to mc "
                            in e.message)
                        return
                    else:
                        raise

        vb_uuid, seqno, high_seqno = self.vb_info(self.servers[0], 5)

        time.sleep(10)

        # more (non-intersecting) load
        gen_create = BlobGenerator('dcp-secondgroup',
                                   'dcpsecondgroup-',
                                   64,
                                   start=0,
                                   end=self.num_items)
        self._load_all_buckets(nodeA, gen_create, "create", 0)

        shell = RemoteMachineShellConnection(self.servers[0])
        shell.kill_memcached()

        time.sleep(10)

        mc1 = MemcachedClientHelper.direct_client(self.servers[0], "default")
        mc2 = MemcachedClientHelper.direct_client(self.servers[1], "default")

        node1_items = mc1.stats()["curr_items_tot"]
        node2_items = mc2.stats()["curr_items_tot"]

        self.assertTrue(
            node1_items == node2_items,
            'Node items not equal. Node 1:{0}, node 2:{1}'.format(
                node1_items, node2_items))
Exemplo n.º 28
0
 def connect_host_port(self, host, port, user, pswd):
     from membase.api.rest_client import RestConnection
     from memcached.helper.data_helper import VBucketAwareMemcached
     info = { "ip": host, "port": port,
              'username': user or 'Administrator',
              'password': pswd or 'password' }
     rest = RestConnection(info)
     self.awareness = VBucketAwareMemcached(rest, user or 'default', info)
     self.backoff = 0
     self.xfer_sent = 0
     self.xfer_recv = 0
Exemplo n.º 29
0
 def _verify_es_results(self, bucket='default'):
     es_docs = self.esrest_conn.all_docs()
     self.log.info("Retrieved ES Docs")
     memcached_conn = VBucketAwareMemcached(self.rest, bucket)
     self.log.info("Comparing CB and ES data")
     for doc in es_docs:
         es_data = doc['doc']
         mc_active = memcached_conn.memcached(str(es_data['_id']))
         cb_flags, cb_cas, cb_data = mc_active.get(str(es_data['_id']))
         self.assertDictEqual(es_data, json.loads(cb_data), "Data mismatch found - es data: {0} cb data: {1}".
                              format(str(es_data), str(cb_data)))
     self.log.info("Data verified")
Exemplo n.º 30
0
 def _reader_thread(self, inserted_keys, bucket_data, moxi=False):
     errors = []
     rest = RestConnection(self._servers[0])
     smartclient = None
     for name in bucket_data:
         for key in inserted_keys:
             if moxi:
                 moxi = MemcachedClientHelper.proxy_client(self._servers[0], name)
             else:
                 smartclient = VBucketAwareMemcached(rest, name)
             try:
                 if moxi:
                     moxi.get(key)
                 else:
                     smartclient.memcached(key).get(key)
             except Exception as ex:
                 errors.append({"error": ex, "key": key})
                 self.log.info(ex)
                 if not moxi:
                     smartclient.done()
                     smartclient = VBucketAwareMemcached(rest, name)